1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::Logger;
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 enum OutboundHTLCState {
170 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
171 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
172 /// we will promote to Committed (note that they may not accept it until the next time we
173 /// revoke, but we don't really care about that:
174 /// * they've revoked, so worst case we can announce an old state and get our (option on)
175 /// money back (though we won't), and,
176 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
177 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
178 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
179 /// we'll never get out of sync).
180 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
181 /// OutboundHTLCOutput's size just for a temporary bit
182 LocalAnnounced(Box<msgs::OnionPacket>),
184 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
185 /// the change (though they'll need to revoke before we fail the payment).
186 RemoteRemoved(OutboundHTLCOutcome),
187 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
188 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
189 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
190 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
191 /// remote revoke_and_ack on a previous state before we can do so.
192 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
193 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
194 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
195 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
196 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
197 /// revoke_and_ack to drop completely.
198 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
202 enum OutboundHTLCOutcome {
203 /// LDK version 0.0.105+ will always fill in the preimage here.
204 Success(Option<PaymentPreimage>),
205 Failure(HTLCFailReason),
208 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
209 fn from(o: Option<HTLCFailReason>) -> Self {
211 None => OutboundHTLCOutcome::Success(None),
212 Some(r) => OutboundHTLCOutcome::Failure(r)
217 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
218 fn into(self) -> Option<&'a HTLCFailReason> {
220 OutboundHTLCOutcome::Success(_) => None,
221 OutboundHTLCOutcome::Failure(ref r) => Some(r)
226 struct OutboundHTLCOutput {
230 payment_hash: PaymentHash,
231 state: OutboundHTLCState,
233 skimmed_fee_msat: Option<u64>,
236 /// See AwaitingRemoteRevoke ChannelState for more info
237 enum HTLCUpdateAwaitingACK {
238 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
242 payment_hash: PaymentHash,
244 onion_routing_packet: msgs::OnionPacket,
245 // The extra fee we're skimming off the top of this HTLC.
246 skimmed_fee_msat: Option<u64>,
249 payment_preimage: PaymentPreimage,
254 err_packet: msgs::OnionErrorPacket,
258 /// There are a few "states" and then a number of flags which can be applied:
259 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
260 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
261 /// move on to `ChannelReady`.
262 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
263 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
264 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
266 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
267 OurInitSent = 1 << 0,
268 /// Implies we have received their `open_channel`/`accept_channel` message
269 TheirInitSent = 1 << 1,
270 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
271 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
272 /// upon receipt of `funding_created`, so simply skip this state.
274 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
275 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
276 /// and our counterparty consider the funding transaction confirmed.
278 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
279 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
280 TheirChannelReady = 1 << 4,
281 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
282 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
283 OurChannelReady = 1 << 5,
285 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
286 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
288 PeerDisconnected = 1 << 7,
289 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
290 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
291 /// sending any outbound messages until they've managed to finish.
292 MonitorUpdateInProgress = 1 << 8,
293 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
294 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
295 /// messages as then we will be unable to determine which HTLCs they included in their
296 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
298 /// Flag is set on `ChannelReady`.
299 AwaitingRemoteRevoke = 1 << 9,
300 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
301 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
302 /// to respond with our own shutdown message when possible.
303 RemoteShutdownSent = 1 << 10,
304 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
305 /// point, we may not add any new HTLCs to the channel.
306 LocalShutdownSent = 1 << 11,
307 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
308 /// to drop us, but we store this anyway.
309 ShutdownComplete = 4096,
310 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
311 /// broadcasting of the funding transaction is being held until all channels in the batch
312 /// have received funding_signed and have their monitors persisted.
313 WaitingForBatch = 1 << 13,
315 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
316 ChannelState::LocalShutdownSent as u32 |
317 ChannelState::RemoteShutdownSent as u32;
318 const MULTI_STATE_FLAGS: u32 =
319 BOTH_SIDES_SHUTDOWN_MASK |
320 ChannelState::PeerDisconnected as u32 |
321 ChannelState::MonitorUpdateInProgress as u32;
322 const STATE_FLAGS: u32 =
324 ChannelState::TheirChannelReady as u32 |
325 ChannelState::OurChannelReady as u32 |
326 ChannelState::AwaitingRemoteRevoke as u32 |
327 ChannelState::WaitingForBatch as u32;
329 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
331 pub const DEFAULT_MAX_HTLCS: u16 = 50;
333 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
334 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
335 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
336 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
340 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
342 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
344 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
346 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
347 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
348 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
349 /// `holder_max_htlc_value_in_flight_msat`.
350 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
352 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
353 /// `option_support_large_channel` (aka wumbo channels) is not supported.
355 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
357 /// Total bitcoin supply in satoshis.
358 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
360 /// The maximum network dust limit for standard script formats. This currently represents the
361 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
362 /// transaction non-standard and thus refuses to relay it.
363 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
364 /// implementations use this value for their dust limit today.
365 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
367 /// The maximum channel dust limit we will accept from our counterparty.
368 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
370 /// The dust limit is used for both the commitment transaction outputs as well as the closing
371 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
372 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
373 /// In order to avoid having to concern ourselves with standardness during the closing process, we
374 /// simply require our counterparty to use a dust limit which will leave any segwit output
376 /// See <https://github.com/lightning/bolts/issues/905> for more details.
377 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
379 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
380 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
382 /// Used to return a simple Error back to ChannelManager. Will get converted to a
383 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
384 /// channel_id in ChannelManager.
385 pub(super) enum ChannelError {
391 impl fmt::Debug for ChannelError {
392 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
394 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
395 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
396 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
401 impl fmt::Display for ChannelError {
402 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
404 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
405 &ChannelError::Warn(ref e) => write!(f, "{}", e),
406 &ChannelError::Close(ref e) => write!(f, "{}", e),
411 macro_rules! secp_check {
412 ($res: expr, $err: expr) => {
415 Err(_) => return Err(ChannelError::Close($err)),
420 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
421 /// our counterparty or not. However, we don't want to announce updates right away to avoid
422 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
423 /// our channel_update message and track the current state here.
424 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
425 #[derive(Clone, Copy, PartialEq)]
426 pub(super) enum ChannelUpdateStatus {
427 /// We've announced the channel as enabled and are connected to our peer.
429 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
431 /// Our channel is live again, but we haven't announced the channel as enabled yet.
433 /// We've announced the channel as disabled.
437 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
439 pub enum AnnouncementSigsState {
440 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
441 /// we sent the last `AnnouncementSignatures`.
443 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
444 /// This state never appears on disk - instead we write `NotSent`.
446 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
447 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
448 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
449 /// they send back a `RevokeAndACK`.
450 /// This state never appears on disk - instead we write `NotSent`.
452 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
453 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
457 /// An enum indicating whether the local or remote side offered a given HTLC.
463 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
466 pending_htlcs_value_msat: u64,
467 on_counterparty_tx_dust_exposure_msat: u64,
468 on_holder_tx_dust_exposure_msat: u64,
469 holding_cell_msat: u64,
470 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
473 /// An enum gathering stats on commitment transaction, either local or remote.
474 struct CommitmentStats<'a> {
475 tx: CommitmentTransaction, // the transaction info
476 feerate_per_kw: u32, // the feerate included to build the transaction
477 total_fee_sat: u64, // the total fee included in the transaction
478 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
479 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
480 local_balance_msat: u64, // local balance before fees but considering dust limits
481 remote_balance_msat: u64, // remote balance before fees but considering dust limits
482 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
485 /// Used when calculating whether we or the remote can afford an additional HTLC.
486 struct HTLCCandidate {
488 origin: HTLCInitiator,
492 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
500 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
502 enum UpdateFulfillFetch {
504 monitor_update: ChannelMonitorUpdate,
505 htlc_value_msat: u64,
506 msg: Option<msgs::UpdateFulfillHTLC>,
511 /// The return type of get_update_fulfill_htlc_and_commit.
512 pub enum UpdateFulfillCommitFetch {
513 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
514 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
515 /// previously placed in the holding cell (and has since been removed).
517 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
518 monitor_update: ChannelMonitorUpdate,
519 /// The value of the HTLC which was claimed, in msat.
520 htlc_value_msat: u64,
522 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
523 /// or has been forgotten (presumably previously claimed).
527 /// The return value of `monitor_updating_restored`
528 pub(super) struct MonitorRestoreUpdates {
529 pub raa: Option<msgs::RevokeAndACK>,
530 pub commitment_update: Option<msgs::CommitmentUpdate>,
531 pub order: RAACommitmentOrder,
532 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
533 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
534 pub finalized_claimed_htlcs: Vec<HTLCSource>,
535 pub funding_broadcastable: Option<Transaction>,
536 pub channel_ready: Option<msgs::ChannelReady>,
537 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
540 /// The return value of `signer_maybe_unblocked`
542 pub(super) struct SignerResumeUpdates {
543 pub commitment_update: Option<msgs::CommitmentUpdate>,
544 pub funding_signed: Option<msgs::FundingSigned>,
545 pub funding_created: Option<msgs::FundingCreated>,
546 pub channel_ready: Option<msgs::ChannelReady>,
549 /// The return value of `channel_reestablish`
550 pub(super) struct ReestablishResponses {
551 pub channel_ready: Option<msgs::ChannelReady>,
552 pub raa: Option<msgs::RevokeAndACK>,
553 pub commitment_update: Option<msgs::CommitmentUpdate>,
554 pub order: RAACommitmentOrder,
555 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
556 pub shutdown_msg: Option<msgs::Shutdown>,
559 /// The result of a shutdown that should be handled.
561 pub(crate) struct ShutdownResult {
562 /// A channel monitor update to apply.
563 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
564 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
565 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
566 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
567 /// propagated to the remainder of the batch.
568 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
571 /// If the majority of the channels funds are to the fundee and the initiator holds only just
572 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
573 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
574 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
575 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
576 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
577 /// by this multiple without hitting this case, before sending.
578 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
579 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
580 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
581 /// leave the channel less usable as we hold a bigger reserve.
582 #[cfg(any(fuzzing, test))]
583 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
584 #[cfg(not(any(fuzzing, test)))]
585 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
587 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
588 /// channel creation on an inbound channel, we simply force-close and move on.
589 /// This constant is the one suggested in BOLT 2.
590 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
592 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
593 /// not have enough balance value remaining to cover the onchain cost of this new
594 /// HTLC weight. If this happens, our counterparty fails the reception of our
595 /// commitment_signed including this new HTLC due to infringement on the channel
597 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
598 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
599 /// leads to a channel force-close. Ultimately, this is an issue coming from the
600 /// design of LN state machines, allowing asynchronous updates.
601 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
603 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
604 /// commitment transaction fees, with at least this many HTLCs present on the commitment
605 /// transaction (not counting the value of the HTLCs themselves).
606 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
608 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
609 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
610 /// ChannelUpdate prompted by the config update. This value was determined as follows:
612 /// * The expected interval between ticks (1 minute).
613 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
614 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
615 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
616 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
618 /// The number of ticks that may elapse while we're waiting for a response to a
619 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
622 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
623 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
625 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
626 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
627 /// exceeding this age limit will be force-closed and purged from memory.
628 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
630 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
631 pub(crate) const COINBASE_MATURITY: u32 = 100;
633 struct PendingChannelMonitorUpdate {
634 update: ChannelMonitorUpdate,
637 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
638 (0, update, required),
641 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
642 /// its variants containing an appropriate channel struct.
643 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
644 UnfundedOutboundV1(OutboundV1Channel<SP>),
645 UnfundedInboundV1(InboundV1Channel<SP>),
649 impl<'a, SP: Deref> ChannelPhase<SP> where
650 SP::Target: SignerProvider,
651 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
653 pub fn context(&'a self) -> &'a ChannelContext<SP> {
655 ChannelPhase::Funded(chan) => &chan.context,
656 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
657 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
661 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
663 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
664 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
665 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
670 /// Contains all state common to unfunded inbound/outbound channels.
671 pub(super) struct UnfundedChannelContext {
672 /// A counter tracking how many ticks have elapsed since this unfunded channel was
673 /// created. If this unfunded channel reaches peer has yet to respond after reaching
674 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
676 /// This is so that we don't keep channels around that haven't progressed to a funded state
677 /// in a timely manner.
678 unfunded_channel_age_ticks: usize,
681 impl UnfundedChannelContext {
682 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
683 /// having reached the unfunded channel age limit.
685 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
686 pub fn should_expire_unfunded_channel(&mut self) -> bool {
687 self.unfunded_channel_age_ticks += 1;
688 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
692 /// Contains everything about the channel including state, and various flags.
693 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
694 config: LegacyChannelConfig,
696 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
697 // constructed using it. The second element in the tuple corresponds to the number of ticks that
698 // have elapsed since the update occurred.
699 prev_config: Option<(ChannelConfig, usize)>,
701 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
705 /// The current channel ID.
706 channel_id: ChannelId,
707 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
708 /// Will be `None` for channels created prior to 0.0.115.
709 temporary_channel_id: Option<ChannelId>,
712 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
713 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
715 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
716 // Note that a number of our tests were written prior to the behavior here which retransmits
717 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
719 #[cfg(any(test, feature = "_test_utils"))]
720 pub(crate) announcement_sigs_state: AnnouncementSigsState,
721 #[cfg(not(any(test, feature = "_test_utils")))]
722 announcement_sigs_state: AnnouncementSigsState,
724 secp_ctx: Secp256k1<secp256k1::All>,
725 channel_value_satoshis: u64,
727 latest_monitor_update_id: u64,
729 holder_signer: ChannelSignerType<SP>,
730 shutdown_scriptpubkey: Option<ShutdownScript>,
731 destination_script: ScriptBuf,
733 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
734 // generation start at 0 and count up...this simplifies some parts of implementation at the
735 // cost of others, but should really just be changed.
737 cur_holder_commitment_transaction_number: u64,
738 cur_counterparty_commitment_transaction_number: u64,
739 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
740 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
741 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
742 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
744 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
745 /// need to ensure we resend them in the order we originally generated them. Note that because
746 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
747 /// sufficient to simply set this to the opposite of any message we are generating as we
748 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
749 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
751 resend_order: RAACommitmentOrder,
753 monitor_pending_channel_ready: bool,
754 monitor_pending_revoke_and_ack: bool,
755 monitor_pending_commitment_signed: bool,
757 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
758 // responsible for some of the HTLCs here or not - we don't know whether the update in question
759 // completed or not. We currently ignore these fields entirely when force-closing a channel,
760 // but need to handle this somehow or we run the risk of losing HTLCs!
761 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
762 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
763 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
765 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
766 /// but our signer (initially) refused to give us a signature, we should retry at some point in
767 /// the future when the signer indicates it may have a signature for us.
769 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
770 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
771 signer_pending_commitment_update: bool,
772 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
773 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
774 /// outbound or inbound.
775 signer_pending_funding: bool,
777 // pending_update_fee is filled when sending and receiving update_fee.
779 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
780 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
781 // generating new commitment transactions with exactly the same criteria as inbound/outbound
782 // HTLCs with similar state.
783 pending_update_fee: Option<(u32, FeeUpdateState)>,
784 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
785 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
786 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
787 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
788 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
789 holding_cell_update_fee: Option<u32>,
790 next_holder_htlc_id: u64,
791 next_counterparty_htlc_id: u64,
794 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
795 /// when the channel is updated in ways which may impact the `channel_update` message or when a
796 /// new block is received, ensuring it's always at least moderately close to the current real
798 update_time_counter: u32,
800 #[cfg(debug_assertions)]
801 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
802 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
803 #[cfg(debug_assertions)]
804 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
805 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
807 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
808 target_closing_feerate_sats_per_kw: Option<u32>,
810 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
811 /// update, we need to delay processing it until later. We do that here by simply storing the
812 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
813 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
815 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
816 /// transaction. These are set once we reach `closing_negotiation_ready`.
818 pub(crate) closing_fee_limits: Option<(u64, u64)>,
820 closing_fee_limits: Option<(u64, u64)>,
822 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
823 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
824 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
825 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
826 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
828 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
829 /// until we see a `commitment_signed` before doing so.
831 /// We don't bother to persist this - we anticipate this state won't last longer than a few
832 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
833 expecting_peer_commitment_signed: bool,
835 /// The hash of the block in which the funding transaction was included.
836 funding_tx_confirmed_in: Option<BlockHash>,
837 funding_tx_confirmation_height: u32,
838 short_channel_id: Option<u64>,
839 /// Either the height at which this channel was created or the height at which it was last
840 /// serialized if it was serialized by versions prior to 0.0.103.
841 /// We use this to close if funding is never broadcasted.
842 channel_creation_height: u32,
844 counterparty_dust_limit_satoshis: u64,
847 pub(super) holder_dust_limit_satoshis: u64,
849 holder_dust_limit_satoshis: u64,
852 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
854 counterparty_max_htlc_value_in_flight_msat: u64,
857 pub(super) holder_max_htlc_value_in_flight_msat: u64,
859 holder_max_htlc_value_in_flight_msat: u64,
861 /// minimum channel reserve for self to maintain - set by them.
862 counterparty_selected_channel_reserve_satoshis: Option<u64>,
865 pub(super) holder_selected_channel_reserve_satoshis: u64,
867 holder_selected_channel_reserve_satoshis: u64,
869 counterparty_htlc_minimum_msat: u64,
870 holder_htlc_minimum_msat: u64,
872 pub counterparty_max_accepted_htlcs: u16,
874 counterparty_max_accepted_htlcs: u16,
875 holder_max_accepted_htlcs: u16,
876 minimum_depth: Option<u32>,
878 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
880 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
881 funding_transaction: Option<Transaction>,
882 is_batch_funding: Option<()>,
884 counterparty_cur_commitment_point: Option<PublicKey>,
885 counterparty_prev_commitment_point: Option<PublicKey>,
886 counterparty_node_id: PublicKey,
888 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
890 commitment_secrets: CounterpartyCommitmentSecrets,
892 channel_update_status: ChannelUpdateStatus,
893 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
894 /// not complete within a single timer tick (one minute), we should force-close the channel.
895 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
897 /// Note that this field is reset to false on deserialization to give us a chance to connect to
898 /// our peer and start the closing_signed negotiation fresh.
899 closing_signed_in_flight: bool,
901 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
902 /// This can be used to rebroadcast the channel_announcement message later.
903 announcement_sigs: Option<(Signature, Signature)>,
905 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
906 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
907 // be, by comparing the cached values to the fee of the tranaction generated by
908 // `build_commitment_transaction`.
909 #[cfg(any(test, fuzzing))]
910 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
911 #[cfg(any(test, fuzzing))]
912 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
914 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
915 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
916 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
917 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
918 /// message until we receive a channel_reestablish.
920 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
921 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
923 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
924 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
925 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
926 /// unblock the state machine.
928 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
929 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
930 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
932 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
933 /// [`msgs::RevokeAndACK`] message from the counterparty.
934 sent_message_awaiting_response: Option<usize>,
936 #[cfg(any(test, fuzzing))]
937 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
938 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
939 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
940 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
941 // is fine, but as a sanity check in our failure to generate the second claim, we check here
942 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
943 historical_inbound_htlc_fulfills: HashSet<u64>,
945 /// This channel's type, as negotiated during channel open
946 channel_type: ChannelTypeFeatures,
948 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
949 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
950 // the channel's funding UTXO.
952 // We also use this when sending our peer a channel_update that isn't to be broadcasted
953 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
954 // associated channel mapping.
956 // We only bother storing the most recent SCID alias at any time, though our counterparty has
957 // to store all of them.
958 latest_inbound_scid_alias: Option<u64>,
960 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
961 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
962 // don't currently support node id aliases and eventually privacy should be provided with
963 // blinded paths instead of simple scid+node_id aliases.
964 outbound_scid_alias: u64,
966 // We track whether we already emitted a `ChannelPending` event.
967 channel_pending_event_emitted: bool,
969 // We track whether we already emitted a `ChannelReady` event.
970 channel_ready_event_emitted: bool,
972 /// The unique identifier used to re-derive the private key material for the channel through
973 /// [`SignerProvider::derive_channel_signer`].
974 channel_keys_id: [u8; 32],
976 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
977 /// store it here and only release it to the `ChannelManager` once it asks for it.
978 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
981 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
982 /// Allowed in any state (including after shutdown)
983 pub fn get_update_time_counter(&self) -> u32 {
984 self.update_time_counter
987 pub fn get_latest_monitor_update_id(&self) -> u64 {
988 self.latest_monitor_update_id
991 pub fn should_announce(&self) -> bool {
992 self.config.announced_channel
995 pub fn is_outbound(&self) -> bool {
996 self.channel_transaction_parameters.is_outbound_from_holder
999 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1000 /// Allowed in any state (including after shutdown)
1001 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1002 self.config.options.forwarding_fee_base_msat
1005 /// Returns true if we've ever received a message from the remote end for this Channel
1006 pub fn have_received_message(&self) -> bool {
1007 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1010 /// Returns true if this channel is fully established and not known to be closing.
1011 /// Allowed in any state (including after shutdown)
1012 pub fn is_usable(&self) -> bool {
1013 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1014 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1017 /// shutdown state returns the state of the channel in its various stages of shutdown
1018 pub fn shutdown_state(&self) -> ChannelShutdownState {
1019 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1020 return ChannelShutdownState::ShutdownComplete;
1022 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1023 return ChannelShutdownState::ShutdownInitiated;
1025 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1026 return ChannelShutdownState::ResolvingHTLCs;
1028 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1029 return ChannelShutdownState::NegotiatingClosingFee;
1031 return ChannelShutdownState::NotShuttingDown;
1034 fn closing_negotiation_ready(&self) -> bool {
1035 self.pending_inbound_htlcs.is_empty() &&
1036 self.pending_outbound_htlcs.is_empty() &&
1037 self.pending_update_fee.is_none() &&
1038 self.channel_state &
1039 (BOTH_SIDES_SHUTDOWN_MASK |
1040 ChannelState::AwaitingRemoteRevoke as u32 |
1041 ChannelState::PeerDisconnected as u32 |
1042 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1045 /// Returns true if this channel is currently available for use. This is a superset of
1046 /// is_usable() and considers things like the channel being temporarily disabled.
1047 /// Allowed in any state (including after shutdown)
1048 pub fn is_live(&self) -> bool {
1049 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1052 // Public utilities:
1054 pub fn channel_id(&self) -> ChannelId {
1058 // Return the `temporary_channel_id` used during channel establishment.
1060 // Will return `None` for channels created prior to LDK version 0.0.115.
1061 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1062 self.temporary_channel_id
1065 pub fn minimum_depth(&self) -> Option<u32> {
1069 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1070 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1071 pub fn get_user_id(&self) -> u128 {
1075 /// Gets the channel's type
1076 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1080 /// Gets the channel's `short_channel_id`.
1082 /// Will return `None` if the channel hasn't been confirmed yet.
1083 pub fn get_short_channel_id(&self) -> Option<u64> {
1084 self.short_channel_id
1087 /// Allowed in any state (including after shutdown)
1088 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1089 self.latest_inbound_scid_alias
1092 /// Allowed in any state (including after shutdown)
1093 pub fn outbound_scid_alias(&self) -> u64 {
1094 self.outbound_scid_alias
1097 /// Returns the holder signer for this channel.
1099 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1100 return &self.holder_signer
1103 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1104 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1105 /// or prior to any channel actions during `Channel` initialization.
1106 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1107 debug_assert_eq!(self.outbound_scid_alias, 0);
1108 self.outbound_scid_alias = outbound_scid_alias;
1111 /// Returns the funding_txo we either got from our peer, or were given by
1112 /// get_funding_created.
1113 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1114 self.channel_transaction_parameters.funding_outpoint
1117 /// Returns the height in which our funding transaction was confirmed.
1118 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1119 let conf_height = self.funding_tx_confirmation_height;
1120 if conf_height > 0 {
1127 /// Returns the block hash in which our funding transaction was confirmed.
1128 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1129 self.funding_tx_confirmed_in
1132 /// Returns the current number of confirmations on the funding transaction.
1133 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1134 if self.funding_tx_confirmation_height == 0 {
1135 // We either haven't seen any confirmation yet, or observed a reorg.
1139 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1142 fn get_holder_selected_contest_delay(&self) -> u16 {
1143 self.channel_transaction_parameters.holder_selected_contest_delay
1146 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1147 &self.channel_transaction_parameters.holder_pubkeys
1150 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1151 self.channel_transaction_parameters.counterparty_parameters
1152 .as_ref().map(|params| params.selected_contest_delay)
1155 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1156 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1159 /// Allowed in any state (including after shutdown)
1160 pub fn get_counterparty_node_id(&self) -> PublicKey {
1161 self.counterparty_node_id
1164 /// Allowed in any state (including after shutdown)
1165 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1166 self.holder_htlc_minimum_msat
1169 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1170 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1171 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1174 /// Allowed in any state (including after shutdown)
1175 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1177 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1178 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1179 // channel might have been used to route very small values (either by honest users or as DoS).
1180 self.channel_value_satoshis * 1000 * 9 / 10,
1182 self.counterparty_max_htlc_value_in_flight_msat
1186 /// Allowed in any state (including after shutdown)
1187 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1188 self.counterparty_htlc_minimum_msat
1191 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1192 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1193 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1196 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1197 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1198 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1200 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1201 party_max_htlc_value_in_flight_msat
1206 pub fn get_value_satoshis(&self) -> u64 {
1207 self.channel_value_satoshis
1210 pub fn get_fee_proportional_millionths(&self) -> u32 {
1211 self.config.options.forwarding_fee_proportional_millionths
1214 pub fn get_cltv_expiry_delta(&self) -> u16 {
1215 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1218 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1219 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1220 where F::Target: FeeEstimator
1222 match self.config.options.max_dust_htlc_exposure {
1223 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1224 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1225 ConfirmationTarget::OnChainSweep) as u64;
1226 feerate_per_kw.saturating_mul(multiplier)
1228 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1232 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1233 pub fn prev_config(&self) -> Option<ChannelConfig> {
1234 self.prev_config.map(|prev_config| prev_config.0)
1237 // Checks whether we should emit a `ChannelPending` event.
1238 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1239 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1242 // Returns whether we already emitted a `ChannelPending` event.
1243 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1244 self.channel_pending_event_emitted
1247 // Remembers that we already emitted a `ChannelPending` event.
1248 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1249 self.channel_pending_event_emitted = true;
1252 // Checks whether we should emit a `ChannelReady` event.
1253 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1254 self.is_usable() && !self.channel_ready_event_emitted
1257 // Remembers that we already emitted a `ChannelReady` event.
1258 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1259 self.channel_ready_event_emitted = true;
1262 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1263 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1264 /// no longer be considered when forwarding HTLCs.
1265 pub fn maybe_expire_prev_config(&mut self) {
1266 if self.prev_config.is_none() {
1269 let prev_config = self.prev_config.as_mut().unwrap();
1271 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1272 self.prev_config = None;
1276 /// Returns the current [`ChannelConfig`] applied to the channel.
1277 pub fn config(&self) -> ChannelConfig {
1281 /// Updates the channel's config. A bool is returned indicating whether the config update
1282 /// applied resulted in a new ChannelUpdate message.
1283 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1284 let did_channel_update =
1285 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1286 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1287 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1288 if did_channel_update {
1289 self.prev_config = Some((self.config.options, 0));
1290 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1291 // policy change to propagate throughout the network.
1292 self.update_time_counter += 1;
1294 self.config.options = *config;
1298 /// Returns true if funding_signed was sent/received and the
1299 /// funding transaction has been broadcast if necessary.
1300 pub fn is_funding_broadcast(&self) -> bool {
1301 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1302 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1305 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1306 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1307 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1308 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1309 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1311 /// @local is used only to convert relevant internal structures which refer to remote vs local
1312 /// to decide value of outputs and direction of HTLCs.
1313 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1314 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1315 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1316 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1317 /// which peer generated this transaction and "to whom" this transaction flows.
1319 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1320 where L::Target: Logger
1322 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1323 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1324 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1326 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1327 let mut remote_htlc_total_msat = 0;
1328 let mut local_htlc_total_msat = 0;
1329 let mut value_to_self_msat_offset = 0;
1331 let mut feerate_per_kw = self.feerate_per_kw;
1332 if let Some((feerate, update_state)) = self.pending_update_fee {
1333 if match update_state {
1334 // Note that these match the inclusion criteria when scanning
1335 // pending_inbound_htlcs below.
1336 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1337 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1338 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1340 feerate_per_kw = feerate;
1344 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1345 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1346 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1348 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1350 macro_rules! get_htlc_in_commitment {
1351 ($htlc: expr, $offered: expr) => {
1352 HTLCOutputInCommitment {
1354 amount_msat: $htlc.amount_msat,
1355 cltv_expiry: $htlc.cltv_expiry,
1356 payment_hash: $htlc.payment_hash,
1357 transaction_output_index: None
1362 macro_rules! add_htlc_output {
1363 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1364 if $outbound == local { // "offered HTLC output"
1365 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1366 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1369 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1371 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1372 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1373 included_non_dust_htlcs.push((htlc_in_tx, $source));
1375 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1376 included_dust_htlcs.push((htlc_in_tx, $source));
1379 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1380 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1383 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1385 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1386 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1387 included_non_dust_htlcs.push((htlc_in_tx, $source));
1389 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1390 included_dust_htlcs.push((htlc_in_tx, $source));
1396 for ref htlc in self.pending_inbound_htlcs.iter() {
1397 let (include, state_name) = match htlc.state {
1398 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1399 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1400 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1401 InboundHTLCState::Committed => (true, "Committed"),
1402 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1406 add_htlc_output!(htlc, false, None, state_name);
1407 remote_htlc_total_msat += htlc.amount_msat;
1409 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1411 &InboundHTLCState::LocalRemoved(ref reason) => {
1412 if generated_by_local {
1413 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1414 value_to_self_msat_offset += htlc.amount_msat as i64;
1423 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1425 for ref htlc in self.pending_outbound_htlcs.iter() {
1426 let (include, state_name) = match htlc.state {
1427 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1428 OutboundHTLCState::Committed => (true, "Committed"),
1429 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1430 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1431 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1434 let preimage_opt = match htlc.state {
1435 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1436 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1437 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1441 if let Some(preimage) = preimage_opt {
1442 preimages.push(preimage);
1446 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1447 local_htlc_total_msat += htlc.amount_msat;
1449 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1451 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1452 value_to_self_msat_offset -= htlc.amount_msat as i64;
1454 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1455 if !generated_by_local {
1456 value_to_self_msat_offset -= htlc.amount_msat as i64;
1464 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1465 assert!(value_to_self_msat >= 0);
1466 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1467 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1468 // "violate" their reserve value by couting those against it. Thus, we have to convert
1469 // everything to i64 before subtracting as otherwise we can overflow.
1470 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1471 assert!(value_to_remote_msat >= 0);
1473 #[cfg(debug_assertions)]
1475 // Make sure that the to_self/to_remote is always either past the appropriate
1476 // channel_reserve *or* it is making progress towards it.
1477 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1478 self.holder_max_commitment_tx_output.lock().unwrap()
1480 self.counterparty_max_commitment_tx_output.lock().unwrap()
1482 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1483 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1484 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1485 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1488 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1489 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1490 let (value_to_self, value_to_remote) = if self.is_outbound() {
1491 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1493 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1496 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1497 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1498 let (funding_pubkey_a, funding_pubkey_b) = if local {
1499 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1501 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1504 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1505 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1510 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1511 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1516 let num_nondust_htlcs = included_non_dust_htlcs.len();
1518 let channel_parameters =
1519 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1520 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1521 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1528 &mut included_non_dust_htlcs,
1531 let mut htlcs_included = included_non_dust_htlcs;
1532 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1533 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1534 htlcs_included.append(&mut included_dust_htlcs);
1536 // For the stats, trimmed-to-0 the value in msats accordingly
1537 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1538 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1546 local_balance_msat: value_to_self_msat as u64,
1547 remote_balance_msat: value_to_remote_msat as u64,
1553 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1554 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1555 /// our counterparty!)
1556 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1557 /// TODO Some magic rust shit to compile-time check this?
1558 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1559 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1560 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1561 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1562 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1564 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1568 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1569 /// will sign and send to our counterparty.
1570 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1571 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1572 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1573 //may see payments to it!
1574 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1575 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1576 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1578 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1581 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1582 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1583 /// Panics if called before accept_channel/InboundV1Channel::new
1584 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1585 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1588 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1589 &self.get_counterparty_pubkeys().funding_pubkey
1592 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1596 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1597 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1598 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1599 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1600 // more dust balance if the feerate increases when we have several HTLCs pending
1601 // which are near the dust limit.
1602 let mut feerate_per_kw = self.feerate_per_kw;
1603 // If there's a pending update fee, use it to ensure we aren't under-estimating
1604 // potential feerate updates coming soon.
1605 if let Some((feerate, _)) = self.pending_update_fee {
1606 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1608 if let Some(feerate) = outbound_feerate_update {
1609 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1611 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1614 /// Get forwarding information for the counterparty.
1615 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1616 self.counterparty_forwarding_info.clone()
1619 /// Returns a HTLCStats about inbound pending htlcs
1620 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1622 let mut stats = HTLCStats {
1623 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1624 pending_htlcs_value_msat: 0,
1625 on_counterparty_tx_dust_exposure_msat: 0,
1626 on_holder_tx_dust_exposure_msat: 0,
1627 holding_cell_msat: 0,
1628 on_holder_tx_holding_cell_htlcs_count: 0,
1631 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1634 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1635 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1636 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1638 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1639 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1640 for ref htlc in context.pending_inbound_htlcs.iter() {
1641 stats.pending_htlcs_value_msat += htlc.amount_msat;
1642 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1643 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1645 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1646 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1652 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1653 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1655 let mut stats = HTLCStats {
1656 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1657 pending_htlcs_value_msat: 0,
1658 on_counterparty_tx_dust_exposure_msat: 0,
1659 on_holder_tx_dust_exposure_msat: 0,
1660 holding_cell_msat: 0,
1661 on_holder_tx_holding_cell_htlcs_count: 0,
1664 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1667 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1668 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1669 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1671 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1672 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1673 for ref htlc in context.pending_outbound_htlcs.iter() {
1674 stats.pending_htlcs_value_msat += htlc.amount_msat;
1675 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1676 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1678 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1679 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1683 for update in context.holding_cell_htlc_updates.iter() {
1684 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1685 stats.pending_htlcs += 1;
1686 stats.pending_htlcs_value_msat += amount_msat;
1687 stats.holding_cell_msat += amount_msat;
1688 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1689 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1691 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1692 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1694 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1701 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1702 /// Doesn't bother handling the
1703 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1704 /// corner case properly.
1705 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1706 -> AvailableBalances
1707 where F::Target: FeeEstimator
1709 let context = &self;
1710 // Note that we have to handle overflow due to the above case.
1711 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1712 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1714 let mut balance_msat = context.value_to_self_msat;
1715 for ref htlc in context.pending_inbound_htlcs.iter() {
1716 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1717 balance_msat += htlc.amount_msat;
1720 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1722 let outbound_capacity_msat = context.value_to_self_msat
1723 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1725 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1727 let mut available_capacity_msat = outbound_capacity_msat;
1729 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1730 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1734 if context.is_outbound() {
1735 // We should mind channel commit tx fee when computing how much of the available capacity
1736 // can be used in the next htlc. Mirrors the logic in send_htlc.
1738 // The fee depends on whether the amount we will be sending is above dust or not,
1739 // and the answer will in turn change the amount itself — making it a circular
1741 // This complicates the computation around dust-values, up to the one-htlc-value.
1742 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1743 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1744 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1747 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1748 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1749 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1750 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1751 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1752 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1753 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1756 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1757 // value ends up being below dust, we have this fee available again. In that case,
1758 // match the value to right-below-dust.
1759 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1760 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1761 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1762 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1763 debug_assert!(one_htlc_difference_msat != 0);
1764 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1765 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1766 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1768 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1771 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1772 // sending a new HTLC won't reduce their balance below our reserve threshold.
1773 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1774 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1775 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1778 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1779 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1781 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1782 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1783 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1785 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1786 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1787 // we've selected for them, we can only send dust HTLCs.
1788 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1792 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1794 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1795 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1796 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1797 // send above the dust limit (as the router can always overpay to meet the dust limit).
1798 let mut remaining_msat_below_dust_exposure_limit = None;
1799 let mut dust_exposure_dust_limit_msat = 0;
1800 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1802 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1803 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1805 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1806 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1807 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1809 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1810 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1811 remaining_msat_below_dust_exposure_limit =
1812 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1813 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1816 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1817 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1818 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1819 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1820 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1821 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1824 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1825 if available_capacity_msat < dust_exposure_dust_limit_msat {
1826 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1828 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1832 available_capacity_msat = cmp::min(available_capacity_msat,
1833 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1835 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1836 available_capacity_msat = 0;
1840 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1841 - context.value_to_self_msat as i64
1842 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1843 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1845 outbound_capacity_msat,
1846 next_outbound_htlc_limit_msat: available_capacity_msat,
1847 next_outbound_htlc_minimum_msat,
1852 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1853 let context = &self;
1854 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1857 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1858 /// number of pending HTLCs that are on track to be in our next commitment tx.
1860 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1861 /// `fee_spike_buffer_htlc` is `Some`.
1863 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1864 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1866 /// Dust HTLCs are excluded.
1867 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1868 let context = &self;
1869 assert!(context.is_outbound());
1871 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1874 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1875 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1877 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1878 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1880 let mut addl_htlcs = 0;
1881 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1883 HTLCInitiator::LocalOffered => {
1884 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1888 HTLCInitiator::RemoteOffered => {
1889 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1895 let mut included_htlcs = 0;
1896 for ref htlc in context.pending_inbound_htlcs.iter() {
1897 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1900 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1901 // transaction including this HTLC if it times out before they RAA.
1902 included_htlcs += 1;
1905 for ref htlc in context.pending_outbound_htlcs.iter() {
1906 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1910 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1911 OutboundHTLCState::Committed => included_htlcs += 1,
1912 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1913 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1914 // transaction won't be generated until they send us their next RAA, which will mean
1915 // dropping any HTLCs in this state.
1920 for htlc in context.holding_cell_htlc_updates.iter() {
1922 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1923 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1928 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1929 // ack we're guaranteed to never include them in commitment txs anymore.
1933 let num_htlcs = included_htlcs + addl_htlcs;
1934 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1935 #[cfg(any(test, fuzzing))]
1938 if fee_spike_buffer_htlc.is_some() {
1939 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1941 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1942 + context.holding_cell_htlc_updates.len();
1943 let commitment_tx_info = CommitmentTxInfoCached {
1945 total_pending_htlcs,
1946 next_holder_htlc_id: match htlc.origin {
1947 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1948 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1950 next_counterparty_htlc_id: match htlc.origin {
1951 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1952 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1954 feerate: context.feerate_per_kw,
1956 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1961 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1962 /// pending HTLCs that are on track to be in their next commitment tx
1964 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1965 /// `fee_spike_buffer_htlc` is `Some`.
1967 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1968 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1970 /// Dust HTLCs are excluded.
1971 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1972 let context = &self;
1973 assert!(!context.is_outbound());
1975 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1978 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1979 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1981 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1982 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1984 let mut addl_htlcs = 0;
1985 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1987 HTLCInitiator::LocalOffered => {
1988 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1992 HTLCInitiator::RemoteOffered => {
1993 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1999 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2000 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2001 // committed outbound HTLCs, see below.
2002 let mut included_htlcs = 0;
2003 for ref htlc in context.pending_inbound_htlcs.iter() {
2004 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2007 included_htlcs += 1;
2010 for ref htlc in context.pending_outbound_htlcs.iter() {
2011 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2014 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2015 // i.e. if they've responded to us with an RAA after announcement.
2017 OutboundHTLCState::Committed => included_htlcs += 1,
2018 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2019 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2024 let num_htlcs = included_htlcs + addl_htlcs;
2025 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2026 #[cfg(any(test, fuzzing))]
2029 if fee_spike_buffer_htlc.is_some() {
2030 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2032 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2033 let commitment_tx_info = CommitmentTxInfoCached {
2035 total_pending_htlcs,
2036 next_holder_htlc_id: match htlc.origin {
2037 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2038 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2040 next_counterparty_htlc_id: match htlc.origin {
2041 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2042 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2044 feerate: context.feerate_per_kw,
2046 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2051 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2052 where F: Fn() -> Option<O> {
2053 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2054 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2061 /// Returns the transaction if there is a pending funding transaction that is yet to be
2063 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2064 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2067 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2069 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2070 self.if_unbroadcasted_funding(||
2071 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2075 /// Returns whether the channel is funded in a batch.
2076 pub fn is_batch_funding(&self) -> bool {
2077 self.is_batch_funding.is_some()
2080 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2082 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2083 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2086 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2087 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2088 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2089 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2090 /// immediately (others we will have to allow to time out).
2091 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2092 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2093 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2094 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2095 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2096 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2098 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2099 // return them to fail the payment.
2100 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2101 let counterparty_node_id = self.get_counterparty_node_id();
2102 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2104 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2105 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2110 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2111 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2112 // returning a channel monitor update here would imply a channel monitor update before
2113 // we even registered the channel monitor to begin with, which is invalid.
2114 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2115 // funding transaction, don't return a funding txo (which prevents providing the
2116 // monitor update to the user, even if we return one).
2117 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2118 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2119 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2120 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2121 update_id: self.latest_monitor_update_id,
2122 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2126 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2128 self.channel_state = ChannelState::ShutdownComplete as u32;
2129 self.update_time_counter += 1;
2132 dropped_outbound_htlcs,
2133 unbroadcasted_batch_funding_txid,
2137 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2138 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2139 let counterparty_keys = self.build_remote_transaction_keys();
2140 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2141 let signature = match &self.holder_signer {
2142 // TODO (taproot|arik): move match into calling method for Taproot
2143 ChannelSignerType::Ecdsa(ecdsa) => {
2144 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2145 .map(|(sig, _)| sig).ok()?
2147 // TODO (taproot|arik)
2152 if self.signer_pending_funding {
2153 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2154 self.signer_pending_funding = false;
2157 Some(msgs::FundingCreated {
2158 temporary_channel_id: self.temporary_channel_id.unwrap(),
2159 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2160 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2163 partial_signature_with_nonce: None,
2165 next_local_nonce: None,
2169 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2170 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2171 let counterparty_keys = self.build_remote_transaction_keys();
2172 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2174 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2175 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2176 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2177 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2179 match &self.holder_signer {
2180 // TODO (arik): move match into calling method for Taproot
2181 ChannelSignerType::Ecdsa(ecdsa) => {
2182 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2183 .map(|(signature, _)| msgs::FundingSigned {
2184 channel_id: self.channel_id(),
2187 partial_signature_with_nonce: None,
2191 if funding_signed.is_none() {
2192 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2193 self.signer_pending_funding = true;
2194 } else if self.signer_pending_funding {
2195 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2196 self.signer_pending_funding = false;
2199 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2200 (counterparty_initial_commitment_tx, funding_signed)
2202 // TODO (taproot|arik)
2209 // Internal utility functions for channels
2211 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2212 /// `channel_value_satoshis` in msat, set through
2213 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2215 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2217 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2218 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2219 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2221 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2224 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2226 channel_value_satoshis * 10 * configured_percent
2229 /// Returns a minimum channel reserve value the remote needs to maintain,
2230 /// required by us according to the configured or default
2231 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2233 /// Guaranteed to return a value no larger than channel_value_satoshis
2235 /// This is used both for outbound and inbound channels and has lower bound
2236 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2237 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2238 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2239 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2242 /// This is for legacy reasons, present for forward-compatibility.
2243 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2244 /// from storage. Hence, we use this function to not persist default values of
2245 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2246 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2247 let (q, _) = channel_value_satoshis.overflowing_div(100);
2248 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2251 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2252 // Note that num_htlcs should not include dust HTLCs.
2254 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2255 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2258 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2259 // Note that num_htlcs should not include dust HTLCs.
2260 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2261 // Note that we need to divide before multiplying to round properly,
2262 // since the lowest denomination of bitcoin on-chain is the satoshi.
2263 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2266 // Holder designates channel data owned for the benefit of the user client.
2267 // Counterparty designates channel data owned by the another channel participant entity.
2268 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2269 pub context: ChannelContext<SP>,
2272 #[cfg(any(test, fuzzing))]
2273 struct CommitmentTxInfoCached {
2275 total_pending_htlcs: usize,
2276 next_holder_htlc_id: u64,
2277 next_counterparty_htlc_id: u64,
2281 impl<SP: Deref> Channel<SP> where
2282 SP::Target: SignerProvider,
2283 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2285 fn check_remote_fee<F: Deref, L: Deref>(
2286 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2287 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2288 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2290 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2291 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2293 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2295 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2296 if feerate_per_kw < lower_limit {
2297 if let Some(cur_feerate) = cur_feerate_per_kw {
2298 if feerate_per_kw > cur_feerate {
2300 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2301 cur_feerate, feerate_per_kw);
2305 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2311 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2312 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2313 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2314 // outside of those situations will fail.
2315 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2319 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2324 1 + // script length (0)
2328 )*4 + // * 4 for non-witness parts
2329 2 + // witness marker and flag
2330 1 + // witness element count
2331 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2332 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2333 2*(1 + 71); // two signatures + sighash type flags
2334 if let Some(spk) = a_scriptpubkey {
2335 ret += ((8+1) + // output values and script length
2336 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2338 if let Some(spk) = b_scriptpubkey {
2339 ret += ((8+1) + // output values and script length
2340 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2346 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2347 assert!(self.context.pending_inbound_htlcs.is_empty());
2348 assert!(self.context.pending_outbound_htlcs.is_empty());
2349 assert!(self.context.pending_update_fee.is_none());
2351 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2352 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2353 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2355 if value_to_holder < 0 {
2356 assert!(self.context.is_outbound());
2357 total_fee_satoshis += (-value_to_holder) as u64;
2358 } else if value_to_counterparty < 0 {
2359 assert!(!self.context.is_outbound());
2360 total_fee_satoshis += (-value_to_counterparty) as u64;
2363 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2364 value_to_counterparty = 0;
2367 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2368 value_to_holder = 0;
2371 assert!(self.context.shutdown_scriptpubkey.is_some());
2372 let holder_shutdown_script = self.get_closing_scriptpubkey();
2373 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2374 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2376 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2377 (closing_transaction, total_fee_satoshis)
2380 fn funding_outpoint(&self) -> OutPoint {
2381 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2384 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2387 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2388 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2390 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2392 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2393 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2394 where L::Target: Logger {
2395 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2396 // (see equivalent if condition there).
2397 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2398 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2399 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2400 self.context.latest_monitor_update_id = mon_update_id;
2401 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2402 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2406 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2407 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2408 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2409 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2411 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2412 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2414 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2416 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2417 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2418 // these, but for now we just have to treat them as normal.
2420 let mut pending_idx = core::usize::MAX;
2421 let mut htlc_value_msat = 0;
2422 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2423 if htlc.htlc_id == htlc_id_arg {
2424 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2425 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2426 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2428 InboundHTLCState::Committed => {},
2429 InboundHTLCState::LocalRemoved(ref reason) => {
2430 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2432 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2433 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2435 return UpdateFulfillFetch::DuplicateClaim {};
2438 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2439 // Don't return in release mode here so that we can update channel_monitor
2443 htlc_value_msat = htlc.amount_msat;
2447 if pending_idx == core::usize::MAX {
2448 #[cfg(any(test, fuzzing))]
2449 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2450 // this is simply a duplicate claim, not previously failed and we lost funds.
2451 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2452 return UpdateFulfillFetch::DuplicateClaim {};
2455 // Now update local state:
2457 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2458 // can claim it even if the channel hits the chain before we see their next commitment.
2459 self.context.latest_monitor_update_id += 1;
2460 let monitor_update = ChannelMonitorUpdate {
2461 update_id: self.context.latest_monitor_update_id,
2462 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2463 payment_preimage: payment_preimage_arg.clone(),
2467 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2468 // Note that this condition is the same as the assertion in
2469 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2470 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2471 // do not not get into this branch.
2472 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2473 match pending_update {
2474 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2475 if htlc_id_arg == htlc_id {
2476 // Make sure we don't leave latest_monitor_update_id incremented here:
2477 self.context.latest_monitor_update_id -= 1;
2478 #[cfg(any(test, fuzzing))]
2479 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2480 return UpdateFulfillFetch::DuplicateClaim {};
2483 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2484 if htlc_id_arg == htlc_id {
2485 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2486 // TODO: We may actually be able to switch to a fulfill here, though its
2487 // rare enough it may not be worth the complexity burden.
2488 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2489 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2495 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2496 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2497 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2499 #[cfg(any(test, fuzzing))]
2500 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2501 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2503 #[cfg(any(test, fuzzing))]
2504 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2507 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2508 if let InboundHTLCState::Committed = htlc.state {
2510 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2511 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2513 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2514 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2517 UpdateFulfillFetch::NewClaim {
2520 msg: Some(msgs::UpdateFulfillHTLC {
2521 channel_id: self.context.channel_id(),
2522 htlc_id: htlc_id_arg,
2523 payment_preimage: payment_preimage_arg,
2528 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2529 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2530 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2531 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2532 // Even if we aren't supposed to let new monitor updates with commitment state
2533 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2534 // matter what. Sadly, to push a new monitor update which flies before others
2535 // already queued, we have to insert it into the pending queue and update the
2536 // update_ids of all the following monitors.
2537 if release_cs_monitor && msg.is_some() {
2538 let mut additional_update = self.build_commitment_no_status_check(logger);
2539 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2540 // to be strictly increasing by one, so decrement it here.
2541 self.context.latest_monitor_update_id = monitor_update.update_id;
2542 monitor_update.updates.append(&mut additional_update.updates);
2544 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2545 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2546 monitor_update.update_id = new_mon_id;
2547 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2548 held_update.update.update_id += 1;
2551 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2552 let update = self.build_commitment_no_status_check(logger);
2553 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2559 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2560 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2562 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2566 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2567 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2568 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2569 /// before we fail backwards.
2571 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2572 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2573 /// [`ChannelError::Ignore`].
2574 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2575 -> Result<(), ChannelError> where L::Target: Logger {
2576 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2577 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2580 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2581 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2582 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2583 /// before we fail backwards.
2585 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2586 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2587 /// [`ChannelError::Ignore`].
2588 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2589 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2590 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2591 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2593 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2595 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2596 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2597 // these, but for now we just have to treat them as normal.
2599 let mut pending_idx = core::usize::MAX;
2600 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2601 if htlc.htlc_id == htlc_id_arg {
2603 InboundHTLCState::Committed => {},
2604 InboundHTLCState::LocalRemoved(ref reason) => {
2605 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2607 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2612 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2613 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2619 if pending_idx == core::usize::MAX {
2620 #[cfg(any(test, fuzzing))]
2621 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2622 // is simply a duplicate fail, not previously failed and we failed-back too early.
2623 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2627 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2628 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2629 force_holding_cell = true;
2632 // Now update local state:
2633 if force_holding_cell {
2634 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2635 match pending_update {
2636 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2637 if htlc_id_arg == htlc_id {
2638 #[cfg(any(test, fuzzing))]
2639 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2643 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2644 if htlc_id_arg == htlc_id {
2645 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2646 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2652 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2653 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2654 htlc_id: htlc_id_arg,
2660 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2662 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2663 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2666 Ok(Some(msgs::UpdateFailHTLC {
2667 channel_id: self.context.channel_id(),
2668 htlc_id: htlc_id_arg,
2673 // Message handlers:
2675 /// Handles a funding_signed message from the remote end.
2676 /// If this call is successful, broadcast the funding transaction (and not before!)
2677 pub fn funding_signed<L: Deref>(
2678 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2679 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
2683 if !self.context.is_outbound() {
2684 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2686 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2687 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2689 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2690 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2691 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2692 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2695 let funding_script = self.context.get_funding_redeemscript();
2697 let counterparty_keys = self.context.build_remote_transaction_keys();
2698 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2699 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2700 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2702 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2703 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2705 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2706 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2708 let trusted_tx = initial_commitment_tx.trust();
2709 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2710 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2711 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2712 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2713 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2717 let holder_commitment_tx = HolderCommitmentTransaction::new(
2718 initial_commitment_tx,
2721 &self.context.get_holder_pubkeys().funding_pubkey,
2722 self.context.counterparty_funding_pubkey()
2725 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2726 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2729 let funding_redeemscript = self.context.get_funding_redeemscript();
2730 let funding_txo = self.context.get_funding_txo().unwrap();
2731 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2732 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2733 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2734 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2735 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2736 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2737 shutdown_script, self.context.get_holder_selected_contest_delay(),
2738 &self.context.destination_script, (funding_txo, funding_txo_script),
2739 &self.context.channel_transaction_parameters,
2740 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2742 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2744 channel_monitor.provide_initial_counterparty_commitment_tx(
2745 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2746 self.context.cur_counterparty_commitment_transaction_number,
2747 self.context.counterparty_cur_commitment_point.unwrap(),
2748 counterparty_initial_commitment_tx.feerate_per_kw(),
2749 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2750 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2752 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2753 if self.context.is_batch_funding() {
2754 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2756 self.context.channel_state = ChannelState::FundingSent as u32;
2758 self.context.cur_holder_commitment_transaction_number -= 1;
2759 self.context.cur_counterparty_commitment_transaction_number -= 1;
2761 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2763 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2764 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2768 /// Updates the state of the channel to indicate that all channels in the batch have received
2769 /// funding_signed and persisted their monitors.
2770 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2771 /// treated as a non-batch channel going forward.
2772 pub fn set_batch_ready(&mut self) {
2773 self.context.is_batch_funding = None;
2774 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2777 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2778 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2780 pub fn channel_ready<NS: Deref, L: Deref>(
2781 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2782 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2783 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2785 NS::Target: NodeSigner,
2788 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2789 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2790 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2793 if let Some(scid_alias) = msg.short_channel_id_alias {
2794 if Some(scid_alias) != self.context.short_channel_id {
2795 // The scid alias provided can be used to route payments *from* our counterparty,
2796 // i.e. can be used for inbound payments and provided in invoices, but is not used
2797 // when routing outbound payments.
2798 self.context.latest_inbound_scid_alias = Some(scid_alias);
2802 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2804 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2805 // batch, but we can receive channel_ready messages.
2807 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2808 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2810 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2811 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2812 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2813 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2814 self.context.update_time_counter += 1;
2815 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2816 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2817 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2818 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2820 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2821 // required, or they're sending a fresh SCID alias.
2822 let expected_point =
2823 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2824 // If they haven't ever sent an updated point, the point they send should match
2826 self.context.counterparty_cur_commitment_point
2827 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2828 // If we've advanced the commitment number once, the second commitment point is
2829 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2830 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2831 self.context.counterparty_prev_commitment_point
2833 // If they have sent updated points, channel_ready is always supposed to match
2834 // their "first" point, which we re-derive here.
2835 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2836 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2837 ).expect("We already advanced, so previous secret keys should have been validated already")))
2839 if expected_point != Some(msg.next_per_commitment_point) {
2840 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2844 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2847 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2848 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2850 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2852 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2855 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2856 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2857 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2858 ) -> Result<(), ChannelError>
2859 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2860 FE::Target: FeeEstimator, L::Target: Logger,
2862 // We can't accept HTLCs sent after we've sent a shutdown.
2863 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2864 if local_sent_shutdown {
2865 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2867 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2868 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2869 if remote_sent_shutdown {
2870 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2872 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2873 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2875 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2876 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2878 if msg.amount_msat == 0 {
2879 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2881 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2882 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2885 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2886 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2887 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2888 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2890 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2891 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2894 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2895 // the reserve_satoshis we told them to always have as direct payment so that they lose
2896 // something if we punish them for broadcasting an old state).
2897 // Note that we don't really care about having a small/no to_remote output in our local
2898 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2899 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2900 // present in the next commitment transaction we send them (at least for fulfilled ones,
2901 // failed ones won't modify value_to_self).
2902 // Note that we will send HTLCs which another instance of rust-lightning would think
2903 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2904 // Channel state once they will not be present in the next received commitment
2906 let mut removed_outbound_total_msat = 0;
2907 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2908 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2909 removed_outbound_total_msat += htlc.amount_msat;
2910 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2911 removed_outbound_total_msat += htlc.amount_msat;
2915 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2916 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2919 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2920 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2921 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2923 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2924 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2925 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2926 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2927 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2928 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2929 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2933 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2934 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2935 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2936 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2937 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2938 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2939 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2943 let pending_value_to_self_msat =
2944 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2945 let pending_remote_value_msat =
2946 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2947 if pending_remote_value_msat < msg.amount_msat {
2948 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2951 // Check that the remote can afford to pay for this HTLC on-chain at the current
2952 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2954 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2955 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2956 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2958 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2959 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2963 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2964 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2966 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2967 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
2971 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2972 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2976 if !self.context.is_outbound() {
2977 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
2978 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
2979 // side, only on the sender's. Note that with anchor outputs we are no longer as
2980 // sensitive to fee spikes, so we need to account for them.
2981 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2982 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
2983 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2984 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2986 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
2987 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
2988 // the HTLC, i.e. its status is already set to failing.
2989 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
2990 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2993 // Check that they won't violate our local required channel reserve by adding this HTLC.
2994 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2995 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
2996 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
2997 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3000 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3001 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3003 if msg.cltv_expiry >= 500000000 {
3004 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3007 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3008 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3009 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3013 // Now update local state:
3014 self.context.next_counterparty_htlc_id += 1;
3015 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3016 htlc_id: msg.htlc_id,
3017 amount_msat: msg.amount_msat,
3018 payment_hash: msg.payment_hash,
3019 cltv_expiry: msg.cltv_expiry,
3020 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3025 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3027 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3028 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3029 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3030 if htlc.htlc_id == htlc_id {
3031 let outcome = match check_preimage {
3032 None => fail_reason.into(),
3033 Some(payment_preimage) => {
3034 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3035 if payment_hash != htlc.payment_hash {
3036 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3038 OutboundHTLCOutcome::Success(Some(payment_preimage))
3042 OutboundHTLCState::LocalAnnounced(_) =>
3043 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3044 OutboundHTLCState::Committed => {
3045 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3047 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3048 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3053 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3056 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3057 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3058 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3060 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3061 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3064 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3067 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3068 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3069 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3071 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3072 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3075 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3079 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3080 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3081 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3083 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3084 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3087 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3091 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3092 where L::Target: Logger
3094 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3095 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3097 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3098 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3100 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3101 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3104 let funding_script = self.context.get_funding_redeemscript();
3106 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3108 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3109 let commitment_txid = {
3110 let trusted_tx = commitment_stats.tx.trust();
3111 let bitcoin_tx = trusted_tx.built_transaction();
3112 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3114 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3115 log_bytes!(msg.signature.serialize_compact()[..]),
3116 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3117 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3118 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3119 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3123 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3125 // If our counterparty updated the channel fee in this commitment transaction, check that
3126 // they can actually afford the new fee now.
3127 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3128 update_state == FeeUpdateState::RemoteAnnounced
3131 debug_assert!(!self.context.is_outbound());
3132 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3133 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3134 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3137 #[cfg(any(test, fuzzing))]
3139 if self.context.is_outbound() {
3140 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3141 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3142 if let Some(info) = projected_commit_tx_info {
3143 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3144 + self.context.holding_cell_htlc_updates.len();
3145 if info.total_pending_htlcs == total_pending_htlcs
3146 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3147 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3148 && info.feerate == self.context.feerate_per_kw {
3149 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3155 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3156 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3159 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3160 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3161 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3162 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3163 // backwards compatibility, we never use it in production. To provide test coverage, here,
3164 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3165 #[allow(unused_assignments, unused_mut)]
3166 let mut separate_nondust_htlc_sources = false;
3167 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3168 use core::hash::{BuildHasher, Hasher};
3169 // Get a random value using the only std API to do so - the DefaultHasher
3170 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3171 separate_nondust_htlc_sources = rand_val % 2 == 0;
3174 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3175 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3176 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3177 if let Some(_) = htlc.transaction_output_index {
3178 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3179 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3180 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3182 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3183 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3184 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3185 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3186 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3187 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3188 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3189 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3191 if !separate_nondust_htlc_sources {
3192 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3195 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3197 if separate_nondust_htlc_sources {
3198 if let Some(source) = source_opt.take() {
3199 nondust_htlc_sources.push(source);
3202 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3205 let holder_commitment_tx = HolderCommitmentTransaction::new(
3206 commitment_stats.tx,
3208 msg.htlc_signatures.clone(),
3209 &self.context.get_holder_pubkeys().funding_pubkey,
3210 self.context.counterparty_funding_pubkey()
3213 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3214 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3216 // Update state now that we've passed all the can-fail calls...
3217 let mut need_commitment = false;
3218 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3219 if *update_state == FeeUpdateState::RemoteAnnounced {
3220 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3221 need_commitment = true;
3225 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3226 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3227 Some(forward_info.clone())
3229 if let Some(forward_info) = new_forward {
3230 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3231 &htlc.payment_hash, &self.context.channel_id);
3232 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3233 need_commitment = true;
3236 let mut claimed_htlcs = Vec::new();
3237 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3238 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3239 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3240 &htlc.payment_hash, &self.context.channel_id);
3241 // Grab the preimage, if it exists, instead of cloning
3242 let mut reason = OutboundHTLCOutcome::Success(None);
3243 mem::swap(outcome, &mut reason);
3244 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3245 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3246 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3247 // have a `Success(None)` reason. In this case we could forget some HTLC
3248 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3249 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3251 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3253 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3254 need_commitment = true;
3258 self.context.latest_monitor_update_id += 1;
3259 let mut monitor_update = ChannelMonitorUpdate {
3260 update_id: self.context.latest_monitor_update_id,
3261 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3262 commitment_tx: holder_commitment_tx,
3263 htlc_outputs: htlcs_and_sigs,
3265 nondust_htlc_sources,
3269 self.context.cur_holder_commitment_transaction_number -= 1;
3270 self.context.expecting_peer_commitment_signed = false;
3271 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3272 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3273 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3275 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3276 // In case we initially failed monitor updating without requiring a response, we need
3277 // to make sure the RAA gets sent first.
3278 self.context.monitor_pending_revoke_and_ack = true;
3279 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3280 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3281 // the corresponding HTLC status updates so that
3282 // get_last_commitment_update_for_send includes the right HTLCs.
3283 self.context.monitor_pending_commitment_signed = true;
3284 let mut additional_update = self.build_commitment_no_status_check(logger);
3285 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3286 // strictly increasing by one, so decrement it here.
3287 self.context.latest_monitor_update_id = monitor_update.update_id;
3288 monitor_update.updates.append(&mut additional_update.updates);
3290 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3291 &self.context.channel_id);
3292 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3295 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3296 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3297 // we'll send one right away when we get the revoke_and_ack when we
3298 // free_holding_cell_htlcs().
3299 let mut additional_update = self.build_commitment_no_status_check(logger);
3300 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3301 // strictly increasing by one, so decrement it here.
3302 self.context.latest_monitor_update_id = monitor_update.update_id;
3303 monitor_update.updates.append(&mut additional_update.updates);
3307 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3308 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3309 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3310 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3313 /// Public version of the below, checking relevant preconditions first.
3314 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3315 /// returns `(None, Vec::new())`.
3316 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3317 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3318 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3319 where F::Target: FeeEstimator, L::Target: Logger
3321 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3322 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3323 self.free_holding_cell_htlcs(fee_estimator, logger)
3324 } else { (None, Vec::new()) }
3327 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3328 /// for our counterparty.
3329 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3330 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3331 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3332 where F::Target: FeeEstimator, L::Target: Logger
3334 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3335 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3336 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3337 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3339 let mut monitor_update = ChannelMonitorUpdate {
3340 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3341 updates: Vec::new(),
3344 let mut htlc_updates = Vec::new();
3345 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3346 let mut update_add_count = 0;
3347 let mut update_fulfill_count = 0;
3348 let mut update_fail_count = 0;
3349 let mut htlcs_to_fail = Vec::new();
3350 for htlc_update in htlc_updates.drain(..) {
3351 // Note that this *can* fail, though it should be due to rather-rare conditions on
3352 // fee races with adding too many outputs which push our total payments just over
3353 // the limit. In case it's less rare than I anticipate, we may want to revisit
3354 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3355 // to rebalance channels.
3356 match &htlc_update {
3357 &HTLCUpdateAwaitingACK::AddHTLC {
3358 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3359 skimmed_fee_msat, ..
3361 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3362 onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3364 Ok(_) => update_add_count += 1,
3367 ChannelError::Ignore(ref msg) => {
3368 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3369 // If we fail to send here, then this HTLC should
3370 // be failed backwards. Failing to send here
3371 // indicates that this HTLC may keep being put back
3372 // into the holding cell without ever being
3373 // successfully forwarded/failed/fulfilled, causing
3374 // our counterparty to eventually close on us.
3375 htlcs_to_fail.push((source.clone(), *payment_hash));
3378 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3384 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3385 // If an HTLC claim was previously added to the holding cell (via
3386 // `get_update_fulfill_htlc`, then generating the claim message itself must
3387 // not fail - any in between attempts to claim the HTLC will have resulted
3388 // in it hitting the holding cell again and we cannot change the state of a
3389 // holding cell HTLC from fulfill to anything else.
3390 let mut additional_monitor_update =
3391 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3392 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3393 { monitor_update } else { unreachable!() };
3394 update_fulfill_count += 1;
3395 monitor_update.updates.append(&mut additional_monitor_update.updates);
3397 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3398 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3399 Ok(update_fail_msg_option) => {
3400 // If an HTLC failure was previously added to the holding cell (via
3401 // `queue_fail_htlc`) then generating the fail message itself must
3402 // not fail - we should never end up in a state where we double-fail
3403 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3404 // for a full revocation before failing.
3405 debug_assert!(update_fail_msg_option.is_some());
3406 update_fail_count += 1;
3409 if let ChannelError::Ignore(_) = e {}
3411 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3418 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3419 return (None, htlcs_to_fail);
3421 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3422 self.send_update_fee(feerate, false, fee_estimator, logger)
3427 let mut additional_update = self.build_commitment_no_status_check(logger);
3428 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3429 // but we want them to be strictly increasing by one, so reset it here.
3430 self.context.latest_monitor_update_id = monitor_update.update_id;
3431 monitor_update.updates.append(&mut additional_update.updates);
3433 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3434 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3435 update_add_count, update_fulfill_count, update_fail_count);
3437 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3438 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3444 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3445 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3446 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3447 /// generating an appropriate error *after* the channel state has been updated based on the
3448 /// revoke_and_ack message.
3449 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3450 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3451 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3452 where F::Target: FeeEstimator, L::Target: Logger,
3454 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3455 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3457 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3458 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3460 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3461 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3464 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3466 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3467 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3468 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3472 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3473 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3474 // haven't given them a new commitment transaction to broadcast). We should probably
3475 // take advantage of this by updating our channel monitor, sending them an error, and
3476 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3477 // lot of work, and there's some chance this is all a misunderstanding anyway.
3478 // We have to do *something*, though, since our signer may get mad at us for otherwise
3479 // jumping a remote commitment number, so best to just force-close and move on.
3480 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3483 #[cfg(any(test, fuzzing))]
3485 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3486 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3489 match &self.context.holder_signer {
3490 ChannelSignerType::Ecdsa(ecdsa) => {
3491 ecdsa.validate_counterparty_revocation(
3492 self.context.cur_counterparty_commitment_transaction_number + 1,
3494 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3496 // TODO (taproot|arik)
3501 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3502 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3503 self.context.latest_monitor_update_id += 1;
3504 let mut monitor_update = ChannelMonitorUpdate {
3505 update_id: self.context.latest_monitor_update_id,
3506 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3507 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3508 secret: msg.per_commitment_secret,
3512 // Update state now that we've passed all the can-fail calls...
3513 // (note that we may still fail to generate the new commitment_signed message, but that's
3514 // OK, we step the channel here and *then* if the new generation fails we can fail the
3515 // channel based on that, but stepping stuff here should be safe either way.
3516 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3517 self.context.sent_message_awaiting_response = None;
3518 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3519 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3520 self.context.cur_counterparty_commitment_transaction_number -= 1;
3522 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3523 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3526 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3527 let mut to_forward_infos = Vec::new();
3528 let mut revoked_htlcs = Vec::new();
3529 let mut finalized_claimed_htlcs = Vec::new();
3530 let mut update_fail_htlcs = Vec::new();
3531 let mut update_fail_malformed_htlcs = Vec::new();
3532 let mut require_commitment = false;
3533 let mut value_to_self_msat_diff: i64 = 0;
3536 // Take references explicitly so that we can hold multiple references to self.context.
3537 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3538 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3539 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3541 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3542 pending_inbound_htlcs.retain(|htlc| {
3543 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3544 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3545 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3546 value_to_self_msat_diff += htlc.amount_msat as i64;
3548 *expecting_peer_commitment_signed = true;
3552 pending_outbound_htlcs.retain(|htlc| {
3553 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3554 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3555 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3556 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3558 finalized_claimed_htlcs.push(htlc.source.clone());
3559 // They fulfilled, so we sent them money
3560 value_to_self_msat_diff -= htlc.amount_msat as i64;
3565 for htlc in pending_inbound_htlcs.iter_mut() {
3566 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3568 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3572 let mut state = InboundHTLCState::Committed;
3573 mem::swap(&mut state, &mut htlc.state);
3575 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3576 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3577 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3578 require_commitment = true;
3579 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3580 match forward_info {
3581 PendingHTLCStatus::Fail(fail_msg) => {
3582 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3583 require_commitment = true;
3585 HTLCFailureMsg::Relay(msg) => {
3586 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3587 update_fail_htlcs.push(msg)
3589 HTLCFailureMsg::Malformed(msg) => {
3590 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3591 update_fail_malformed_htlcs.push(msg)
3595 PendingHTLCStatus::Forward(forward_info) => {
3596 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3597 to_forward_infos.push((forward_info, htlc.htlc_id));
3598 htlc.state = InboundHTLCState::Committed;
3604 for htlc in pending_outbound_htlcs.iter_mut() {
3605 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3606 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3607 htlc.state = OutboundHTLCState::Committed;
3608 *expecting_peer_commitment_signed = true;
3610 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3611 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3612 // Grab the preimage, if it exists, instead of cloning
3613 let mut reason = OutboundHTLCOutcome::Success(None);
3614 mem::swap(outcome, &mut reason);
3615 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3616 require_commitment = true;
3620 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3622 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3623 match update_state {
3624 FeeUpdateState::Outbound => {
3625 debug_assert!(self.context.is_outbound());
3626 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3627 self.context.feerate_per_kw = feerate;
3628 self.context.pending_update_fee = None;
3629 self.context.expecting_peer_commitment_signed = true;
3631 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3632 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3633 debug_assert!(!self.context.is_outbound());
3634 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3635 require_commitment = true;
3636 self.context.feerate_per_kw = feerate;
3637 self.context.pending_update_fee = None;
3642 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3643 let release_state_str =
3644 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3645 macro_rules! return_with_htlcs_to_fail {
3646 ($htlcs_to_fail: expr) => {
3647 if !release_monitor {
3648 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3649 update: monitor_update,
3651 return Ok(($htlcs_to_fail, None));
3653 return Ok(($htlcs_to_fail, Some(monitor_update)));
3658 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3659 // We can't actually generate a new commitment transaction (incl by freeing holding
3660 // cells) while we can't update the monitor, so we just return what we have.
3661 if require_commitment {
3662 self.context.monitor_pending_commitment_signed = true;
3663 // When the monitor updating is restored we'll call
3664 // get_last_commitment_update_for_send(), which does not update state, but we're
3665 // definitely now awaiting a remote revoke before we can step forward any more, so
3667 let mut additional_update = self.build_commitment_no_status_check(logger);
3668 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3669 // strictly increasing by one, so decrement it here.
3670 self.context.latest_monitor_update_id = monitor_update.update_id;
3671 monitor_update.updates.append(&mut additional_update.updates);
3673 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3674 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3675 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3676 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3677 return_with_htlcs_to_fail!(Vec::new());
3680 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3681 (Some(mut additional_update), htlcs_to_fail) => {
3682 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3683 // strictly increasing by one, so decrement it here.
3684 self.context.latest_monitor_update_id = monitor_update.update_id;
3685 monitor_update.updates.append(&mut additional_update.updates);
3687 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3688 &self.context.channel_id(), release_state_str);
3690 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3691 return_with_htlcs_to_fail!(htlcs_to_fail);
3693 (None, htlcs_to_fail) => {
3694 if require_commitment {
3695 let mut additional_update = self.build_commitment_no_status_check(logger);
3697 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3698 // strictly increasing by one, so decrement it here.
3699 self.context.latest_monitor_update_id = monitor_update.update_id;
3700 monitor_update.updates.append(&mut additional_update.updates);
3702 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3703 &self.context.channel_id(),
3704 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3707 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3708 return_with_htlcs_to_fail!(htlcs_to_fail);
3710 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3711 &self.context.channel_id(), release_state_str);
3713 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3714 return_with_htlcs_to_fail!(htlcs_to_fail);
3720 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3721 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3722 /// commitment update.
3723 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3724 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3725 where F::Target: FeeEstimator, L::Target: Logger
3727 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3728 assert!(msg_opt.is_none(), "We forced holding cell?");
3731 /// Adds a pending update to this channel. See the doc for send_htlc for
3732 /// further details on the optionness of the return value.
3733 /// If our balance is too low to cover the cost of the next commitment transaction at the
3734 /// new feerate, the update is cancelled.
3736 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3737 /// [`Channel`] if `force_holding_cell` is false.
3738 fn send_update_fee<F: Deref, L: Deref>(
3739 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3740 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3741 ) -> Option<msgs::UpdateFee>
3742 where F::Target: FeeEstimator, L::Target: Logger
3744 if !self.context.is_outbound() {
3745 panic!("Cannot send fee from inbound channel");
3747 if !self.context.is_usable() {
3748 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3750 if !self.context.is_live() {
3751 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3754 // Before proposing a feerate update, check that we can actually afford the new fee.
3755 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3756 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3757 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3758 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3759 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3760 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3761 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3762 //TODO: auto-close after a number of failures?
3763 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3767 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3768 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3769 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3770 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3771 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3772 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3775 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3776 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3780 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3781 force_holding_cell = true;
3784 if force_holding_cell {
3785 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3789 debug_assert!(self.context.pending_update_fee.is_none());
3790 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3792 Some(msgs::UpdateFee {
3793 channel_id: self.context.channel_id,
3798 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3799 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3801 /// No further message handling calls may be made until a channel_reestablish dance has
3803 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3804 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3805 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3806 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3810 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3811 // While the below code should be idempotent, it's simpler to just return early, as
3812 // redundant disconnect events can fire, though they should be rare.
3816 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3817 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3820 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3821 // will be retransmitted.
3822 self.context.last_sent_closing_fee = None;
3823 self.context.pending_counterparty_closing_signed = None;
3824 self.context.closing_fee_limits = None;
3826 let mut inbound_drop_count = 0;
3827 self.context.pending_inbound_htlcs.retain(|htlc| {
3829 InboundHTLCState::RemoteAnnounced(_) => {
3830 // They sent us an update_add_htlc but we never got the commitment_signed.
3831 // We'll tell them what commitment_signed we're expecting next and they'll drop
3832 // this HTLC accordingly
3833 inbound_drop_count += 1;
3836 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3837 // We received a commitment_signed updating this HTLC and (at least hopefully)
3838 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3839 // in response to it yet, so don't touch it.
3842 InboundHTLCState::Committed => true,
3843 InboundHTLCState::LocalRemoved(_) => {
3844 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3845 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3846 // (that we missed). Keep this around for now and if they tell us they missed
3847 // the commitment_signed we can re-transmit the update then.
3852 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3854 if let Some((_, update_state)) = self.context.pending_update_fee {
3855 if update_state == FeeUpdateState::RemoteAnnounced {
3856 debug_assert!(!self.context.is_outbound());
3857 self.context.pending_update_fee = None;
3861 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3862 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3863 // They sent us an update to remove this but haven't yet sent the corresponding
3864 // commitment_signed, we need to move it back to Committed and they can re-send
3865 // the update upon reconnection.
3866 htlc.state = OutboundHTLCState::Committed;
3870 self.context.sent_message_awaiting_response = None;
3872 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3873 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3877 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3878 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3879 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3880 /// update completes (potentially immediately).
3881 /// The messages which were generated with the monitor update must *not* have been sent to the
3882 /// remote end, and must instead have been dropped. They will be regenerated when
3883 /// [`Self::monitor_updating_restored`] is called.
3885 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3886 /// [`chain::Watch`]: crate::chain::Watch
3887 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3888 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3889 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3890 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3891 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3893 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3894 self.context.monitor_pending_commitment_signed |= resend_commitment;
3895 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3896 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3897 self.context.monitor_pending_failures.append(&mut pending_fails);
3898 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3899 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3902 /// Indicates that the latest ChannelMonitor update has been committed by the client
3903 /// successfully and we should restore normal operation. Returns messages which should be sent
3904 /// to the remote side.
3905 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3906 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3907 user_config: &UserConfig, best_block_height: u32
3908 ) -> MonitorRestoreUpdates
3911 NS::Target: NodeSigner
3913 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3914 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3916 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3917 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3918 // first received the funding_signed.
3919 let mut funding_broadcastable =
3920 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3921 self.context.funding_transaction.take()
3923 // That said, if the funding transaction is already confirmed (ie we're active with a
3924 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3925 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3926 funding_broadcastable = None;
3929 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3930 // (and we assume the user never directly broadcasts the funding transaction and waits for
3931 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3932 // * an inbound channel that failed to persist the monitor on funding_created and we got
3933 // the funding transaction confirmed before the monitor was persisted, or
3934 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3935 let channel_ready = if self.context.monitor_pending_channel_ready {
3936 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3937 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3938 self.context.monitor_pending_channel_ready = false;
3939 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3940 Some(msgs::ChannelReady {
3941 channel_id: self.context.channel_id(),
3942 next_per_commitment_point,
3943 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3947 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3949 let mut accepted_htlcs = Vec::new();
3950 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3951 let mut failed_htlcs = Vec::new();
3952 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3953 let mut finalized_claimed_htlcs = Vec::new();
3954 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3956 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3957 self.context.monitor_pending_revoke_and_ack = false;
3958 self.context.monitor_pending_commitment_signed = false;
3959 return MonitorRestoreUpdates {
3960 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3961 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3965 let raa = if self.context.monitor_pending_revoke_and_ack {
3966 Some(self.get_last_revoke_and_ack())
3968 let commitment_update = if self.context.monitor_pending_commitment_signed {
3969 self.get_last_commitment_update_for_send(logger).ok()
3971 if commitment_update.is_some() {
3972 self.mark_awaiting_response();
3975 self.context.monitor_pending_revoke_and_ack = false;
3976 self.context.monitor_pending_commitment_signed = false;
3977 let order = self.context.resend_order.clone();
3978 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
3979 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
3980 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
3981 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
3982 MonitorRestoreUpdates {
3983 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3987 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
3988 where F::Target: FeeEstimator, L::Target: Logger
3990 if self.context.is_outbound() {
3991 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
3993 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3994 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
3996 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
3998 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
3999 self.context.update_time_counter += 1;
4000 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4001 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4002 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4003 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4004 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4005 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4006 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4007 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4008 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4009 msg.feerate_per_kw, holder_tx_dust_exposure)));
4011 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4012 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4013 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4019 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4022 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4023 let commitment_update = if self.context.signer_pending_commitment_update {
4024 self.get_last_commitment_update_for_send(logger).ok()
4026 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4027 self.context.get_funding_signed_msg(logger).1
4029 let channel_ready = if funding_signed.is_some() {
4030 self.check_get_channel_ready(0)
4032 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4033 self.context.get_funding_created_msg(logger)
4036 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4037 if commitment_update.is_some() { "a" } else { "no" },
4038 if funding_signed.is_some() { "a" } else { "no" },
4039 if funding_created.is_some() { "a" } else { "no" },
4040 if channel_ready.is_some() { "a" } else { "no" });
4042 SignerResumeUpdates {
4050 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4051 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4052 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4053 msgs::RevokeAndACK {
4054 channel_id: self.context.channel_id,
4055 per_commitment_secret,
4056 next_per_commitment_point,
4058 next_local_nonce: None,
4062 /// Gets the last commitment update for immediate sending to our peer.
4063 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4064 let mut update_add_htlcs = Vec::new();
4065 let mut update_fulfill_htlcs = Vec::new();
4066 let mut update_fail_htlcs = Vec::new();
4067 let mut update_fail_malformed_htlcs = Vec::new();
4069 for htlc in self.context.pending_outbound_htlcs.iter() {
4070 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4071 update_add_htlcs.push(msgs::UpdateAddHTLC {
4072 channel_id: self.context.channel_id(),
4073 htlc_id: htlc.htlc_id,
4074 amount_msat: htlc.amount_msat,
4075 payment_hash: htlc.payment_hash,
4076 cltv_expiry: htlc.cltv_expiry,
4077 onion_routing_packet: (**onion_packet).clone(),
4078 skimmed_fee_msat: htlc.skimmed_fee_msat,
4083 for htlc in self.context.pending_inbound_htlcs.iter() {
4084 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4086 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4087 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4088 channel_id: self.context.channel_id(),
4089 htlc_id: htlc.htlc_id,
4090 reason: err_packet.clone()
4093 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4094 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4095 channel_id: self.context.channel_id(),
4096 htlc_id: htlc.htlc_id,
4097 sha256_of_onion: sha256_of_onion.clone(),
4098 failure_code: failure_code.clone(),
4101 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4102 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4103 channel_id: self.context.channel_id(),
4104 htlc_id: htlc.htlc_id,
4105 payment_preimage: payment_preimage.clone(),
4112 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4113 Some(msgs::UpdateFee {
4114 channel_id: self.context.channel_id(),
4115 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4119 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4120 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4121 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4122 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4123 if self.context.signer_pending_commitment_update {
4124 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4125 self.context.signer_pending_commitment_update = false;
4129 if !self.context.signer_pending_commitment_update {
4130 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4131 self.context.signer_pending_commitment_update = true;
4135 Ok(msgs::CommitmentUpdate {
4136 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4141 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4142 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4143 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4144 assert!(self.context.shutdown_scriptpubkey.is_some());
4145 Some(msgs::Shutdown {
4146 channel_id: self.context.channel_id,
4147 scriptpubkey: self.get_closing_scriptpubkey(),
4152 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4153 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4155 /// Some links printed in log lines are included here to check them during build (when run with
4156 /// `cargo doc --document-private-items`):
4157 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4158 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4159 pub fn channel_reestablish<L: Deref, NS: Deref>(
4160 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4161 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4162 ) -> Result<ReestablishResponses, ChannelError>
4165 NS::Target: NodeSigner
4167 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4168 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4169 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4170 // just close here instead of trying to recover.
4171 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4174 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4175 msg.next_local_commitment_number == 0 {
4176 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4179 if msg.next_remote_commitment_number > 0 {
4180 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4181 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4182 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4183 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4184 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4186 if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4187 macro_rules! log_and_panic {
4188 ($err_msg: expr) => {
4189 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4190 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4193 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4194 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4195 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4196 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4197 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4198 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4199 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4200 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4204 // Before we change the state of the channel, we check if the peer is sending a very old
4205 // commitment transaction number, if yes we send a warning message.
4206 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4207 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4209 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4213 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4214 // remaining cases either succeed or ErrorMessage-fail).
4215 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4216 self.context.sent_message_awaiting_response = None;
4218 let shutdown_msg = self.get_outbound_shutdown();
4220 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4222 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4223 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4224 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4225 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4226 if msg.next_remote_commitment_number != 0 {
4227 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4229 // Short circuit the whole handler as there is nothing we can resend them
4230 return Ok(ReestablishResponses {
4231 channel_ready: None,
4232 raa: None, commitment_update: None,
4233 order: RAACommitmentOrder::CommitmentFirst,
4234 shutdown_msg, announcement_sigs,
4238 // We have OurChannelReady set!
4239 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4240 return Ok(ReestablishResponses {
4241 channel_ready: Some(msgs::ChannelReady {
4242 channel_id: self.context.channel_id(),
4243 next_per_commitment_point,
4244 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4246 raa: None, commitment_update: None,
4247 order: RAACommitmentOrder::CommitmentFirst,
4248 shutdown_msg, announcement_sigs,
4252 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4253 // Remote isn't waiting on any RevokeAndACK from us!
4254 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4256 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4257 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4258 self.context.monitor_pending_revoke_and_ack = true;
4261 Some(self.get_last_revoke_and_ack())
4264 return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4267 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4268 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4269 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4270 // the corresponding revoke_and_ack back yet.
4271 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4272 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4273 self.mark_awaiting_response();
4275 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4277 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4278 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4279 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4280 Some(msgs::ChannelReady {
4281 channel_id: self.context.channel_id(),
4282 next_per_commitment_point,
4283 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4287 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4288 if required_revoke.is_some() {
4289 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4291 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4294 Ok(ReestablishResponses {
4295 channel_ready, shutdown_msg, announcement_sigs,
4296 raa: required_revoke,
4297 commitment_update: None,
4298 order: self.context.resend_order.clone(),
4300 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4301 if required_revoke.is_some() {
4302 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4304 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4307 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4308 self.context.monitor_pending_commitment_signed = true;
4309 Ok(ReestablishResponses {
4310 channel_ready, shutdown_msg, announcement_sigs,
4311 commitment_update: None, raa: None,
4312 order: self.context.resend_order.clone(),
4315 Ok(ReestablishResponses {
4316 channel_ready, shutdown_msg, announcement_sigs,
4317 raa: required_revoke,
4318 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4319 order: self.context.resend_order.clone(),
4323 Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4327 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4328 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4329 /// at which point they will be recalculated.
4330 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4332 where F::Target: FeeEstimator
4334 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4336 // Propose a range from our current Background feerate to our Normal feerate plus our
4337 // force_close_avoidance_max_fee_satoshis.
4338 // If we fail to come to consensus, we'll have to force-close.
4339 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4340 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4341 // that we don't expect to need fee bumping
4342 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4343 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4345 // The spec requires that (when the channel does not have anchors) we only send absolute
4346 // channel fees no greater than the absolute channel fee on the current commitment
4347 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4348 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4349 // some force-closure by old nodes, but we wanted to close the channel anyway.
4351 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4352 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4353 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4354 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4357 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4358 // below our dust limit, causing the output to disappear. We don't bother handling this
4359 // case, however, as this should only happen if a channel is closed before any (material)
4360 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4361 // come to consensus with our counterparty on appropriate fees, however it should be a
4362 // relatively rare case. We can revisit this later, though note that in order to determine
4363 // if the funders' output is dust we have to know the absolute fee we're going to use.
4364 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4365 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4366 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4367 // We always add force_close_avoidance_max_fee_satoshis to our normal
4368 // feerate-calculated fee, but allow the max to be overridden if we're using a
4369 // target feerate-calculated fee.
4370 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4371 proposed_max_feerate as u64 * tx_weight / 1000)
4373 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4376 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4377 self.context.closing_fee_limits.clone().unwrap()
4380 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4381 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4382 /// this point if we're the funder we should send the initial closing_signed, and in any case
4383 /// shutdown should complete within a reasonable timeframe.
4384 fn closing_negotiation_ready(&self) -> bool {
4385 self.context.closing_negotiation_ready()
4388 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4389 /// an Err if no progress is being made and the channel should be force-closed instead.
4390 /// Should be called on a one-minute timer.
4391 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4392 if self.closing_negotiation_ready() {
4393 if self.context.closing_signed_in_flight {
4394 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4396 self.context.closing_signed_in_flight = true;
4402 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4403 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4404 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4405 where F::Target: FeeEstimator, L::Target: Logger
4407 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4408 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4409 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4410 // that closing_negotiation_ready checks this case (as well as a few others).
4411 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4412 return Ok((None, None, None));
4415 if !self.context.is_outbound() {
4416 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4417 return self.closing_signed(fee_estimator, &msg);
4419 return Ok((None, None, None));
4422 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4423 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4424 if self.context.expecting_peer_commitment_signed {
4425 return Ok((None, None, None));
4428 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4430 assert!(self.context.shutdown_scriptpubkey.is_some());
4431 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4432 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4433 our_min_fee, our_max_fee, total_fee_satoshis);
4435 match &self.context.holder_signer {
4436 ChannelSignerType::Ecdsa(ecdsa) => {
4438 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4439 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4441 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4442 Ok((Some(msgs::ClosingSigned {
4443 channel_id: self.context.channel_id,
4444 fee_satoshis: total_fee_satoshis,
4446 fee_range: Some(msgs::ClosingSignedFeeRange {
4447 min_fee_satoshis: our_min_fee,
4448 max_fee_satoshis: our_max_fee,
4452 // TODO (taproot|arik)
4458 // Marks a channel as waiting for a response from the counterparty. If it's not received
4459 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4461 fn mark_awaiting_response(&mut self) {
4462 self.context.sent_message_awaiting_response = Some(0);
4465 /// Determines whether we should disconnect the counterparty due to not receiving a response
4466 /// within our expected timeframe.
4468 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4469 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4470 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4473 // Don't disconnect when we're not waiting on a response.
4476 *ticks_elapsed += 1;
4477 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4481 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4482 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4484 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4485 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4487 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4488 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4489 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4490 // can do that via error message without getting a connection fail anyway...
4491 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4493 for htlc in self.context.pending_inbound_htlcs.iter() {
4494 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4495 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4498 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4500 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4501 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4504 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4505 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4506 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4509 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4512 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4513 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4514 // any further commitment updates after we set LocalShutdownSent.
4515 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4517 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4520 assert!(send_shutdown);
4521 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4522 Ok(scriptpubkey) => scriptpubkey,
4523 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4525 if !shutdown_scriptpubkey.is_compatible(their_features) {
4526 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4528 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4533 // From here on out, we may not fail!
4535 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4536 self.context.update_time_counter += 1;
4538 let monitor_update = if update_shutdown_script {
4539 self.context.latest_monitor_update_id += 1;
4540 let monitor_update = ChannelMonitorUpdate {
4541 update_id: self.context.latest_monitor_update_id,
4542 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4543 scriptpubkey: self.get_closing_scriptpubkey(),
4546 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4547 self.push_ret_blockable_mon_update(monitor_update)
4549 let shutdown = if send_shutdown {
4550 Some(msgs::Shutdown {
4551 channel_id: self.context.channel_id,
4552 scriptpubkey: self.get_closing_scriptpubkey(),
4556 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4557 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4558 // cell HTLCs and return them to fail the payment.
4559 self.context.holding_cell_update_fee = None;
4560 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4561 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4563 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4564 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4571 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4572 self.context.update_time_counter += 1;
4574 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4577 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4578 let mut tx = closing_tx.trust().built_transaction().clone();
4580 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4582 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4583 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4584 let mut holder_sig = sig.serialize_der().to_vec();
4585 holder_sig.push(EcdsaSighashType::All as u8);
4586 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4587 cp_sig.push(EcdsaSighashType::All as u8);
4588 if funding_key[..] < counterparty_funding_key[..] {
4589 tx.input[0].witness.push(holder_sig);
4590 tx.input[0].witness.push(cp_sig);
4592 tx.input[0].witness.push(cp_sig);
4593 tx.input[0].witness.push(holder_sig);
4596 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4600 pub fn closing_signed<F: Deref>(
4601 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4602 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4603 where F::Target: FeeEstimator
4605 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4606 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4608 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4609 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4611 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4612 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4614 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4615 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4618 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4619 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4622 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4623 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4624 return Ok((None, None, None));
4627 let funding_redeemscript = self.context.get_funding_redeemscript();
4628 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4629 if used_total_fee != msg.fee_satoshis {
4630 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4632 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4634 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4637 // The remote end may have decided to revoke their output due to inconsistent dust
4638 // limits, so check for that case by re-checking the signature here.
4639 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4640 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4641 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4645 for outp in closing_tx.trust().built_transaction().output.iter() {
4646 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4647 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4651 assert!(self.context.shutdown_scriptpubkey.is_some());
4652 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4653 if last_fee == msg.fee_satoshis {
4654 let shutdown_result = ShutdownResult {
4655 monitor_update: None,
4656 dropped_outbound_htlcs: Vec::new(),
4657 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4659 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4660 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4661 self.context.update_time_counter += 1;
4662 return Ok((None, Some(tx), Some(shutdown_result)));
4666 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4668 macro_rules! propose_fee {
4669 ($new_fee: expr) => {
4670 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4671 (closing_tx, $new_fee)
4673 self.build_closing_transaction($new_fee, false)
4676 return match &self.context.holder_signer {
4677 ChannelSignerType::Ecdsa(ecdsa) => {
4679 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4680 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4681 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4682 let shutdown_result = ShutdownResult {
4683 monitor_update: None,
4684 dropped_outbound_htlcs: Vec::new(),
4685 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4687 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4688 self.context.update_time_counter += 1;
4689 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4690 (Some(tx), Some(shutdown_result))
4695 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4696 Ok((Some(msgs::ClosingSigned {
4697 channel_id: self.context.channel_id,
4698 fee_satoshis: used_fee,
4700 fee_range: Some(msgs::ClosingSignedFeeRange {
4701 min_fee_satoshis: our_min_fee,
4702 max_fee_satoshis: our_max_fee,
4704 }), signed_tx, shutdown_result))
4706 // TODO (taproot|arik)
4713 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4714 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4715 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4717 if max_fee_satoshis < our_min_fee {
4718 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4720 if min_fee_satoshis > our_max_fee {
4721 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4724 if !self.context.is_outbound() {
4725 // They have to pay, so pick the highest fee in the overlapping range.
4726 // We should never set an upper bound aside from their full balance
4727 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4728 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4730 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4731 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4732 msg.fee_satoshis, our_min_fee, our_max_fee)));
4734 // The proposed fee is in our acceptable range, accept it and broadcast!
4735 propose_fee!(msg.fee_satoshis);
4738 // Old fee style negotiation. We don't bother to enforce whether they are complying
4739 // with the "making progress" requirements, we just comply and hope for the best.
4740 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4741 if msg.fee_satoshis > last_fee {
4742 if msg.fee_satoshis < our_max_fee {
4743 propose_fee!(msg.fee_satoshis);
4744 } else if last_fee < our_max_fee {
4745 propose_fee!(our_max_fee);
4747 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4750 if msg.fee_satoshis > our_min_fee {
4751 propose_fee!(msg.fee_satoshis);
4752 } else if last_fee > our_min_fee {
4753 propose_fee!(our_min_fee);
4755 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4759 if msg.fee_satoshis < our_min_fee {
4760 propose_fee!(our_min_fee);
4761 } else if msg.fee_satoshis > our_max_fee {
4762 propose_fee!(our_max_fee);
4764 propose_fee!(msg.fee_satoshis);
4770 fn internal_htlc_satisfies_config(
4771 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4772 ) -> Result<(), (&'static str, u16)> {
4773 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4774 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4775 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4776 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4778 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4779 0x1000 | 12, // fee_insufficient
4782 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4784 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4785 0x1000 | 13, // incorrect_cltv_expiry
4791 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4792 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4793 /// unsuccessful, falls back to the previous one if one exists.
4794 pub fn htlc_satisfies_config(
4795 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4796 ) -> Result<(), (&'static str, u16)> {
4797 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4799 if let Some(prev_config) = self.context.prev_config() {
4800 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4807 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4808 self.context.cur_holder_commitment_transaction_number + 1
4811 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4812 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4815 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4816 self.context.cur_counterparty_commitment_transaction_number + 2
4820 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
4821 &self.context.holder_signer
4825 pub fn get_value_stat(&self) -> ChannelValueStat {
4827 value_to_self_msat: self.context.value_to_self_msat,
4828 channel_value_msat: self.context.channel_value_satoshis * 1000,
4829 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4830 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4831 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4832 holding_cell_outbound_amount_msat: {
4834 for h in self.context.holding_cell_htlc_updates.iter() {
4836 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4844 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4845 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4849 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4850 /// Allowed in any state (including after shutdown)
4851 pub fn is_awaiting_monitor_update(&self) -> bool {
4852 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4855 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4856 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4857 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4858 self.context.blocked_monitor_updates[0].update.update_id - 1
4861 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4862 /// further blocked monitor update exists after the next.
4863 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4864 if self.context.blocked_monitor_updates.is_empty() { return None; }
4865 Some((self.context.blocked_monitor_updates.remove(0).update,
4866 !self.context.blocked_monitor_updates.is_empty()))
4869 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4870 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4871 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4872 -> Option<ChannelMonitorUpdate> {
4873 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4874 if !release_monitor {
4875 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4884 pub fn blocked_monitor_updates_pending(&self) -> usize {
4885 self.context.blocked_monitor_updates.len()
4888 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4889 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4890 /// transaction. If the channel is inbound, this implies simply that the channel has not
4892 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4893 if !self.is_awaiting_monitor_update() { return false; }
4894 if self.context.channel_state &
4895 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4896 == ChannelState::FundingSent as u32 {
4897 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4898 // FundingSent set, though our peer could have sent their channel_ready.
4899 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4902 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4903 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4904 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4905 // waiting for the initial monitor persistence. Thus, we check if our commitment
4906 // transaction numbers have both been iterated only exactly once (for the
4907 // funding_signed), and we're awaiting monitor update.
4909 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4910 // only way to get an awaiting-monitor-update state during initial funding is if the
4911 // initial monitor persistence is still pending).
4913 // Because deciding we're awaiting initial broadcast spuriously could result in
4914 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4915 // we hard-assert here, even in production builds.
4916 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4917 assert!(self.context.monitor_pending_channel_ready);
4918 assert_eq!(self.context.latest_monitor_update_id, 0);
4924 /// Returns true if our channel_ready has been sent
4925 pub fn is_our_channel_ready(&self) -> bool {
4926 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4929 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4930 pub fn received_shutdown(&self) -> bool {
4931 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4934 /// Returns true if we either initiated or agreed to shut down the channel.
4935 pub fn sent_shutdown(&self) -> bool {
4936 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4939 /// Returns true if this channel is fully shut down. True here implies that no further actions
4940 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4941 /// will be handled appropriately by the chain monitor.
4942 pub fn is_shutdown(&self) -> bool {
4943 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
4944 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
4949 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
4950 self.context.channel_update_status
4953 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
4954 self.context.update_time_counter += 1;
4955 self.context.channel_update_status = status;
4958 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
4960 // * always when a new block/transactions are confirmed with the new height
4961 // * when funding is signed with a height of 0
4962 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
4966 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
4967 if funding_tx_confirmations <= 0 {
4968 self.context.funding_tx_confirmation_height = 0;
4971 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
4975 // If we're still pending the signature on a funding transaction, then we're not ready to send a
4976 // channel_ready yet.
4977 if self.context.signer_pending_funding {
4981 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
4982 // channel_ready until the entire batch is ready.
4983 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
4984 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
4985 self.context.channel_state |= ChannelState::OurChannelReady as u32;
4987 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
4988 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
4989 self.context.update_time_counter += 1;
4991 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
4992 // We got a reorg but not enough to trigger a force close, just ignore.
4995 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
4996 // We should never see a funding transaction on-chain until we've received
4997 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
4998 // an inbound channel - before that we have no known funding TXID). The fuzzer,
4999 // however, may do this and we shouldn't treat it as a bug.
5000 #[cfg(not(fuzzing))]
5001 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5002 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5003 self.context.channel_state);
5005 // We got a reorg but not enough to trigger a force close, just ignore.
5009 if need_commitment_update {
5010 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5011 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5012 let next_per_commitment_point =
5013 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5014 return Some(msgs::ChannelReady {
5015 channel_id: self.context.channel_id,
5016 next_per_commitment_point,
5017 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5021 self.context.monitor_pending_channel_ready = true;
5027 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5028 /// In the first case, we store the confirmation height and calculating the short channel id.
5029 /// In the second, we simply return an Err indicating we need to be force-closed now.
5030 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5031 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5032 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5033 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5035 NS::Target: NodeSigner,
5038 let mut msgs = (None, None);
5039 if let Some(funding_txo) = self.context.get_funding_txo() {
5040 for &(index_in_block, tx) in txdata.iter() {
5041 // Check if the transaction is the expected funding transaction, and if it is,
5042 // check that it pays the right amount to the right script.
5043 if self.context.funding_tx_confirmation_height == 0 {
5044 if tx.txid() == funding_txo.txid {
5045 let txo_idx = funding_txo.index as usize;
5046 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5047 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5048 if self.context.is_outbound() {
5049 // If we generated the funding transaction and it doesn't match what it
5050 // should, the client is really broken and we should just panic and
5051 // tell them off. That said, because hash collisions happen with high
5052 // probability in fuzzing mode, if we're fuzzing we just close the
5053 // channel and move on.
5054 #[cfg(not(fuzzing))]
5055 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5057 self.context.update_time_counter += 1;
5058 let err_reason = "funding tx had wrong script/value or output index";
5059 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5061 if self.context.is_outbound() {
5062 if !tx.is_coin_base() {
5063 for input in tx.input.iter() {
5064 if input.witness.is_empty() {
5065 // We generated a malleable funding transaction, implying we've
5066 // just exposed ourselves to funds loss to our counterparty.
5067 #[cfg(not(fuzzing))]
5068 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5073 self.context.funding_tx_confirmation_height = height;
5074 self.context.funding_tx_confirmed_in = Some(*block_hash);
5075 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5076 Ok(scid) => Some(scid),
5077 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5080 // If this is a coinbase transaction and not a 0-conf channel
5081 // we should update our min_depth to 100 to handle coinbase maturity
5082 if tx.is_coin_base() &&
5083 self.context.minimum_depth.unwrap_or(0) > 0 &&
5084 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5085 self.context.minimum_depth = Some(COINBASE_MATURITY);
5088 // If we allow 1-conf funding, we may need to check for channel_ready here and
5089 // send it immediately instead of waiting for a best_block_updated call (which
5090 // may have already happened for this block).
5091 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5092 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5093 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5094 msgs = (Some(channel_ready), announcement_sigs);
5097 for inp in tx.input.iter() {
5098 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5099 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5100 return Err(ClosureReason::CommitmentTxConfirmed);
5108 /// When a new block is connected, we check the height of the block against outbound holding
5109 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5110 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5111 /// handled by the ChannelMonitor.
5113 /// If we return Err, the channel may have been closed, at which point the standard
5114 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5117 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5119 pub fn best_block_updated<NS: Deref, L: Deref>(
5120 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5121 node_signer: &NS, user_config: &UserConfig, logger: &L
5122 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5124 NS::Target: NodeSigner,
5127 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5130 fn do_best_block_updated<NS: Deref, L: Deref>(
5131 &mut self, height: u32, highest_header_time: u32,
5132 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5133 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5135 NS::Target: NodeSigner,
5138 let mut timed_out_htlcs = Vec::new();
5139 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5140 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5142 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5143 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5145 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5146 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5147 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5155 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5157 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5158 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5159 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5161 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5162 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5165 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5166 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5167 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5168 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5169 if self.context.funding_tx_confirmation_height == 0 {
5170 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5171 // zero if it has been reorged out, however in either case, our state flags
5172 // indicate we've already sent a channel_ready
5173 funding_tx_confirmations = 0;
5176 // If we've sent channel_ready (or have both sent and received channel_ready), and
5177 // the funding transaction has become unconfirmed,
5178 // close the channel and hope we can get the latest state on chain (because presumably
5179 // the funding transaction is at least still in the mempool of most nodes).
5181 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5182 // 0-conf channel, but not doing so may lead to the
5183 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5185 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5186 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5187 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5188 return Err(ClosureReason::ProcessingError { err: err_reason });
5190 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5191 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5192 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5193 // If funding_tx_confirmed_in is unset, the channel must not be active
5194 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5195 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5196 return Err(ClosureReason::FundingTimedOut);
5199 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5200 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5202 Ok((None, timed_out_htlcs, announcement_sigs))
5205 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5206 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5207 /// before the channel has reached channel_ready and we can just wait for more blocks.
5208 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5209 if self.context.funding_tx_confirmation_height != 0 {
5210 // We handle the funding disconnection by calling best_block_updated with a height one
5211 // below where our funding was connected, implying a reorg back to conf_height - 1.
5212 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5213 // We use the time field to bump the current time we set on channel updates if its
5214 // larger. If we don't know that time has moved forward, we can just set it to the last
5215 // time we saw and it will be ignored.
5216 let best_time = self.context.update_time_counter;
5217 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5218 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5219 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5220 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5221 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5227 // We never learned about the funding confirmation anyway, just ignore
5232 // Methods to get unprompted messages to send to the remote end (or where we already returned
5233 // something in the handler for the message that prompted this message):
5235 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5236 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5237 /// directions). Should be used for both broadcasted announcements and in response to an
5238 /// AnnouncementSignatures message from the remote peer.
5240 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5243 /// This will only return ChannelError::Ignore upon failure.
5245 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5246 fn get_channel_announcement<NS: Deref>(
5247 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5248 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5249 if !self.context.config.announced_channel {
5250 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5252 if !self.context.is_usable() {
5253 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5256 let short_channel_id = self.context.get_short_channel_id()
5257 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5258 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5259 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5260 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5261 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5263 let msg = msgs::UnsignedChannelAnnouncement {
5264 features: channelmanager::provided_channel_features(&user_config),
5267 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5268 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5269 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5270 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5271 excess_data: Vec::new(),
5277 fn get_announcement_sigs<NS: Deref, L: Deref>(
5278 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5279 best_block_height: u32, logger: &L
5280 ) -> Option<msgs::AnnouncementSignatures>
5282 NS::Target: NodeSigner,
5285 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5289 if !self.context.is_usable() {
5293 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5294 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5298 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5302 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5303 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5306 log_trace!(logger, "{:?}", e);
5310 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5312 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5317 match &self.context.holder_signer {
5318 ChannelSignerType::Ecdsa(ecdsa) => {
5319 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5321 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5326 let short_channel_id = match self.context.get_short_channel_id() {
5328 None => return None,
5331 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5333 Some(msgs::AnnouncementSignatures {
5334 channel_id: self.context.channel_id(),
5336 node_signature: our_node_sig,
5337 bitcoin_signature: our_bitcoin_sig,
5340 // TODO (taproot|arik)
5346 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5348 fn sign_channel_announcement<NS: Deref>(
5349 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5350 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5351 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5352 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5353 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5354 let were_node_one = announcement.node_id_1 == our_node_key;
5356 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5357 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5358 match &self.context.holder_signer {
5359 ChannelSignerType::Ecdsa(ecdsa) => {
5360 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5361 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5362 Ok(msgs::ChannelAnnouncement {
5363 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5364 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5365 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5366 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5367 contents: announcement,
5370 // TODO (taproot|arik)
5375 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5379 /// Processes an incoming announcement_signatures message, providing a fully-signed
5380 /// channel_announcement message which we can broadcast and storing our counterparty's
5381 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5382 pub fn announcement_signatures<NS: Deref>(
5383 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5384 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5385 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5386 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5388 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5390 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5391 return Err(ChannelError::Close(format!(
5392 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5393 &announcement, self.context.get_counterparty_node_id())));
5395 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5396 return Err(ChannelError::Close(format!(
5397 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5398 &announcement, self.context.counterparty_funding_pubkey())));
5401 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5402 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5403 return Err(ChannelError::Ignore(
5404 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5407 self.sign_channel_announcement(node_signer, announcement)
5410 /// Gets a signed channel_announcement for this channel, if we previously received an
5411 /// announcement_signatures from our counterparty.
5412 pub fn get_signed_channel_announcement<NS: Deref>(
5413 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5414 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5415 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5418 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5420 Err(_) => return None,
5422 match self.sign_channel_announcement(node_signer, announcement) {
5423 Ok(res) => Some(res),
5428 /// May panic if called on a channel that wasn't immediately-previously
5429 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5430 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5431 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5432 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5433 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5434 // current to_remote balances. However, it no longer has any use, and thus is now simply
5435 // set to a dummy (but valid, as required by the spec) public key.
5436 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5437 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5438 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5439 let mut pk = [2; 33]; pk[1] = 0xff;
5440 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5441 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5442 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5443 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5446 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5449 self.mark_awaiting_response();
5450 msgs::ChannelReestablish {
5451 channel_id: self.context.channel_id(),
5452 // The protocol has two different commitment number concepts - the "commitment
5453 // transaction number", which starts from 0 and counts up, and the "revocation key
5454 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5455 // commitment transaction numbers by the index which will be used to reveal the
5456 // revocation key for that commitment transaction, which means we have to convert them
5457 // to protocol-level commitment numbers here...
5459 // next_local_commitment_number is the next commitment_signed number we expect to
5460 // receive (indicating if they need to resend one that we missed).
5461 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5462 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5463 // receive, however we track it by the next commitment number for a remote transaction
5464 // (which is one further, as they always revoke previous commitment transaction, not
5465 // the one we send) so we have to decrement by 1. Note that if
5466 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5467 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5469 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5470 your_last_per_commitment_secret: remote_last_secret,
5471 my_current_per_commitment_point: dummy_pubkey,
5472 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5473 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5474 // txid of that interactive transaction, else we MUST NOT set it.
5475 next_funding_txid: None,
5480 // Send stuff to our remote peers:
5482 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5483 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5484 /// commitment update.
5486 /// `Err`s will only be [`ChannelError::Ignore`].
5487 pub fn queue_add_htlc<F: Deref, L: Deref>(
5488 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5489 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5490 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5491 ) -> Result<(), ChannelError>
5492 where F::Target: FeeEstimator, L::Target: Logger
5495 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5496 skimmed_fee_msat, fee_estimator, logger)
5497 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5499 if let ChannelError::Ignore(_) = err { /* fine */ }
5500 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5505 /// Adds a pending outbound HTLC to this channel, note that you probably want
5506 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5508 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5510 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5511 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5513 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5514 /// we may not yet have sent the previous commitment update messages and will need to
5515 /// regenerate them.
5517 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5518 /// on this [`Channel`] if `force_holding_cell` is false.
5520 /// `Err`s will only be [`ChannelError::Ignore`].
5521 fn send_htlc<F: Deref, L: Deref>(
5522 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5523 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5524 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5525 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5526 where F::Target: FeeEstimator, L::Target: Logger
5528 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5529 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5531 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5532 if amount_msat > channel_total_msat {
5533 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5536 if amount_msat == 0 {
5537 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5540 let available_balances = self.context.get_available_balances(fee_estimator);
5541 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5542 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5543 available_balances.next_outbound_htlc_minimum_msat)));
5546 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5547 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5548 available_balances.next_outbound_htlc_limit_msat)));
5551 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5552 // Note that this should never really happen, if we're !is_live() on receipt of an
5553 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5554 // the user to send directly into a !is_live() channel. However, if we
5555 // disconnected during the time the previous hop was doing the commitment dance we may
5556 // end up getting here after the forwarding delay. In any case, returning an
5557 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5558 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5561 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5562 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5563 payment_hash, amount_msat,
5564 if force_holding_cell { "into holding cell" }
5565 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5566 else { "to peer" });
5568 if need_holding_cell {
5569 force_holding_cell = true;
5572 // Now update local state:
5573 if force_holding_cell {
5574 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5579 onion_routing_packet,
5585 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5586 htlc_id: self.context.next_holder_htlc_id,
5588 payment_hash: payment_hash.clone(),
5590 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5595 let res = msgs::UpdateAddHTLC {
5596 channel_id: self.context.channel_id,
5597 htlc_id: self.context.next_holder_htlc_id,
5601 onion_routing_packet,
5604 self.context.next_holder_htlc_id += 1;
5609 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5610 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5611 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5612 // fail to generate this, we still are at least at a position where upgrading their status
5614 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5615 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5616 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5618 if let Some(state) = new_state {
5619 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5623 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5624 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5625 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5626 // Grab the preimage, if it exists, instead of cloning
5627 let mut reason = OutboundHTLCOutcome::Success(None);
5628 mem::swap(outcome, &mut reason);
5629 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5632 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5633 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5634 debug_assert!(!self.context.is_outbound());
5635 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5636 self.context.feerate_per_kw = feerate;
5637 self.context.pending_update_fee = None;
5640 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5642 let (mut htlcs_ref, counterparty_commitment_tx) =
5643 self.build_commitment_no_state_update(logger);
5644 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5645 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5646 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5648 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5649 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5652 self.context.latest_monitor_update_id += 1;
5653 let monitor_update = ChannelMonitorUpdate {
5654 update_id: self.context.latest_monitor_update_id,
5655 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5656 commitment_txid: counterparty_commitment_txid,
5657 htlc_outputs: htlcs.clone(),
5658 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5659 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5660 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5661 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5662 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5665 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5669 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5670 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5671 where L::Target: Logger
5673 let counterparty_keys = self.context.build_remote_transaction_keys();
5674 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5675 let counterparty_commitment_tx = commitment_stats.tx;
5677 #[cfg(any(test, fuzzing))]
5679 if !self.context.is_outbound() {
5680 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5681 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5682 if let Some(info) = projected_commit_tx_info {
5683 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5684 if info.total_pending_htlcs == total_pending_htlcs
5685 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5686 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5687 && info.feerate == self.context.feerate_per_kw {
5688 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5689 assert_eq!(actual_fee, info.fee);
5695 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5698 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5699 /// generation when we shouldn't change HTLC/channel state.
5700 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5701 // Get the fee tests from `build_commitment_no_state_update`
5702 #[cfg(any(test, fuzzing))]
5703 self.build_commitment_no_state_update(logger);
5705 let counterparty_keys = self.context.build_remote_transaction_keys();
5706 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5707 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5709 match &self.context.holder_signer {
5710 ChannelSignerType::Ecdsa(ecdsa) => {
5711 let (signature, htlc_signatures);
5714 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5715 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5719 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5720 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5722 htlc_signatures = res.1;
5724 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5725 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5726 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5727 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5729 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5730 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5731 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5732 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5733 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5734 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5738 Ok((msgs::CommitmentSigned {
5739 channel_id: self.context.channel_id,
5743 partial_signature_with_nonce: None,
5744 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5746 // TODO (taproot|arik)
5752 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5753 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5755 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5756 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5757 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5758 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5759 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5760 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5761 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5762 where F::Target: FeeEstimator, L::Target: Logger
5764 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5765 onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
5766 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5769 let monitor_update = self.build_commitment_no_status_check(logger);
5770 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5771 Ok(self.push_ret_blockable_mon_update(monitor_update))
5777 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5779 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5780 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5781 fee_base_msat: msg.contents.fee_base_msat,
5782 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5783 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5785 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5787 self.context.counterparty_forwarding_info = new_forwarding_info;
5793 /// Begins the shutdown process, getting a message for the remote peer and returning all
5794 /// holding cell HTLCs for payment failure.
5796 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5797 /// [`ChannelMonitorUpdate`] will be returned).
5798 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5799 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5800 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5802 for htlc in self.context.pending_outbound_htlcs.iter() {
5803 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5804 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5807 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5808 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5809 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5811 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5812 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5815 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5816 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5818 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5819 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5820 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5823 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5824 // script is set, we just force-close and call it a day.
5825 let mut chan_closed = false;
5826 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5830 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5832 None if !chan_closed => {
5833 // use override shutdown script if provided
5834 let shutdown_scriptpubkey = match override_shutdown_script {
5835 Some(script) => script,
5837 // otherwise, use the shutdown scriptpubkey provided by the signer
5838 match signer_provider.get_shutdown_scriptpubkey() {
5839 Ok(scriptpubkey) => scriptpubkey,
5840 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5844 if !shutdown_scriptpubkey.is_compatible(their_features) {
5845 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5847 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5853 // From here on out, we may not fail!
5854 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5855 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5856 let shutdown_result = ShutdownResult {
5857 monitor_update: None,
5858 dropped_outbound_htlcs: Vec::new(),
5859 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5861 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5862 Some(shutdown_result)
5864 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5867 self.context.update_time_counter += 1;
5869 let monitor_update = if update_shutdown_script {
5870 self.context.latest_monitor_update_id += 1;
5871 let monitor_update = ChannelMonitorUpdate {
5872 update_id: self.context.latest_monitor_update_id,
5873 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5874 scriptpubkey: self.get_closing_scriptpubkey(),
5877 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5878 self.push_ret_blockable_mon_update(monitor_update)
5880 let shutdown = msgs::Shutdown {
5881 channel_id: self.context.channel_id,
5882 scriptpubkey: self.get_closing_scriptpubkey(),
5885 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5886 // our shutdown until we've committed all of the pending changes.
5887 self.context.holding_cell_update_fee = None;
5888 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5889 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5891 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5892 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5899 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5900 "we can't both complete shutdown and return a monitor update");
5902 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5905 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5906 self.context.holding_cell_htlc_updates.iter()
5907 .flat_map(|htlc_update| {
5909 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5910 => Some((source, payment_hash)),
5914 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5918 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5919 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5920 pub context: ChannelContext<SP>,
5921 pub unfunded_context: UnfundedChannelContext,
5924 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5925 pub fn new<ES: Deref, F: Deref>(
5926 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5927 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5928 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
5929 ) -> Result<OutboundV1Channel<SP>, APIError>
5930 where ES::Target: EntropySource,
5931 F::Target: FeeEstimator
5933 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5934 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5935 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5936 let pubkeys = holder_signer.pubkeys().clone();
5938 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5939 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5941 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5942 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5944 let channel_value_msat = channel_value_satoshis * 1000;
5945 if push_msat > channel_value_msat {
5946 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
5948 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
5949 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
5951 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
5952 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
5953 // Protocol level safety check in place, although it should never happen because
5954 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
5955 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
5958 let channel_type = Self::get_initial_channel_type(&config, their_features);
5959 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
5961 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
5962 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
5964 (ConfirmationTarget::NonAnchorChannelFee, 0)
5966 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
5968 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
5969 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
5970 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
5971 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
5974 let mut secp_ctx = Secp256k1::new();
5975 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
5977 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
5978 match signer_provider.get_shutdown_scriptpubkey() {
5979 Ok(scriptpubkey) => Some(scriptpubkey),
5980 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
5984 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
5985 if !shutdown_scriptpubkey.is_compatible(&their_features) {
5986 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5990 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
5991 Ok(script) => script,
5992 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
5995 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
5998 context: ChannelContext {
6001 config: LegacyChannelConfig {
6002 options: config.channel_config.clone(),
6003 announced_channel: config.channel_handshake_config.announced_channel,
6004 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6009 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6011 channel_id: temporary_channel_id,
6012 temporary_channel_id: Some(temporary_channel_id),
6013 channel_state: ChannelState::OurInitSent as u32,
6014 announcement_sigs_state: AnnouncementSigsState::NotSent,
6016 channel_value_satoshis,
6018 latest_monitor_update_id: 0,
6020 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6021 shutdown_scriptpubkey,
6024 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6025 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6028 pending_inbound_htlcs: Vec::new(),
6029 pending_outbound_htlcs: Vec::new(),
6030 holding_cell_htlc_updates: Vec::new(),
6031 pending_update_fee: None,
6032 holding_cell_update_fee: None,
6033 next_holder_htlc_id: 0,
6034 next_counterparty_htlc_id: 0,
6035 update_time_counter: 1,
6037 resend_order: RAACommitmentOrder::CommitmentFirst,
6039 monitor_pending_channel_ready: false,
6040 monitor_pending_revoke_and_ack: false,
6041 monitor_pending_commitment_signed: false,
6042 monitor_pending_forwards: Vec::new(),
6043 monitor_pending_failures: Vec::new(),
6044 monitor_pending_finalized_fulfills: Vec::new(),
6046 signer_pending_commitment_update: false,
6047 signer_pending_funding: false,
6049 #[cfg(debug_assertions)]
6050 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6051 #[cfg(debug_assertions)]
6052 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6054 last_sent_closing_fee: None,
6055 pending_counterparty_closing_signed: None,
6056 expecting_peer_commitment_signed: false,
6057 closing_fee_limits: None,
6058 target_closing_feerate_sats_per_kw: None,
6060 funding_tx_confirmed_in: None,
6061 funding_tx_confirmation_height: 0,
6062 short_channel_id: None,
6063 channel_creation_height: current_chain_height,
6065 feerate_per_kw: commitment_feerate,
6066 counterparty_dust_limit_satoshis: 0,
6067 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6068 counterparty_max_htlc_value_in_flight_msat: 0,
6069 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6070 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6071 holder_selected_channel_reserve_satoshis,
6072 counterparty_htlc_minimum_msat: 0,
6073 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6074 counterparty_max_accepted_htlcs: 0,
6075 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6076 minimum_depth: None, // Filled in in accept_channel
6078 counterparty_forwarding_info: None,
6080 channel_transaction_parameters: ChannelTransactionParameters {
6081 holder_pubkeys: pubkeys,
6082 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6083 is_outbound_from_holder: true,
6084 counterparty_parameters: None,
6085 funding_outpoint: None,
6086 channel_type_features: channel_type.clone()
6088 funding_transaction: None,
6089 is_batch_funding: None,
6091 counterparty_cur_commitment_point: None,
6092 counterparty_prev_commitment_point: None,
6093 counterparty_node_id,
6095 counterparty_shutdown_scriptpubkey: None,
6097 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6099 channel_update_status: ChannelUpdateStatus::Enabled,
6100 closing_signed_in_flight: false,
6102 announcement_sigs: None,
6104 #[cfg(any(test, fuzzing))]
6105 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6106 #[cfg(any(test, fuzzing))]
6107 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6109 workaround_lnd_bug_4006: None,
6110 sent_message_awaiting_response: None,
6112 latest_inbound_scid_alias: None,
6113 outbound_scid_alias,
6115 channel_pending_event_emitted: false,
6116 channel_ready_event_emitted: false,
6118 #[cfg(any(test, fuzzing))]
6119 historical_inbound_htlc_fulfills: HashSet::new(),
6124 blocked_monitor_updates: Vec::new(),
6126 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6130 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6131 /// a funding_created message for the remote peer.
6132 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6133 /// or if called on an inbound channel.
6134 /// Note that channel_id changes during this call!
6135 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6136 /// If an Err is returned, it is a ChannelError::Close.
6137 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6138 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6139 if !self.context.is_outbound() {
6140 panic!("Tried to create outbound funding_created message on an inbound channel!");
6142 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6143 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6145 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6146 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6147 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6148 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6151 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6152 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6154 // Now that we're past error-generating stuff, update our local state:
6156 self.context.channel_state = ChannelState::FundingCreated as u32;
6157 self.context.channel_id = funding_txo.to_channel_id();
6159 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6160 // We can skip this if it is a zero-conf channel.
6161 if funding_transaction.is_coin_base() &&
6162 self.context.minimum_depth.unwrap_or(0) > 0 &&
6163 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6164 self.context.minimum_depth = Some(COINBASE_MATURITY);
6167 self.context.funding_transaction = Some(funding_transaction);
6168 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6170 let funding_created = self.context.get_funding_created_msg(logger);
6171 if funding_created.is_none() {
6172 if !self.context.signer_pending_funding {
6173 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6174 self.context.signer_pending_funding = true;
6178 let channel = Channel {
6179 context: self.context,
6182 Ok((channel, funding_created))
6185 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6186 // The default channel type (ie the first one we try) depends on whether the channel is
6187 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6188 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6189 // with no other changes, and fall back to `only_static_remotekey`.
6190 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6191 if !config.channel_handshake_config.announced_channel &&
6192 config.channel_handshake_config.negotiate_scid_privacy &&
6193 their_features.supports_scid_privacy() {
6194 ret.set_scid_privacy_required();
6197 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6198 // set it now. If they don't understand it, we'll fall back to our default of
6199 // `only_static_remotekey`.
6200 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6201 their_features.supports_anchors_zero_fee_htlc_tx() {
6202 ret.set_anchors_zero_fee_htlc_tx_required();
6208 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6209 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6210 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6211 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6212 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6213 ) -> Result<msgs::OpenChannel, ()>
6215 F::Target: FeeEstimator
6217 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6218 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6219 // We've exhausted our options
6222 // We support opening a few different types of channels. Try removing our additional
6223 // features one by one until we've either arrived at our default or the counterparty has
6226 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6227 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6228 // checks whether the counterparty supports every feature, this would only happen if the
6229 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6231 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6232 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6233 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6234 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6235 } else if self.context.channel_type.supports_scid_privacy() {
6236 self.context.channel_type.clear_scid_privacy();
6238 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6240 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6241 Ok(self.get_open_channel(chain_hash))
6244 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6245 if !self.context.is_outbound() {
6246 panic!("Tried to open a channel for an inbound channel?");
6248 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6249 panic!("Cannot generate an open_channel after we've moved forward");
6252 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6253 panic!("Tried to send an open_channel for a channel that has already advanced");
6256 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6257 let keys = self.context.get_holder_pubkeys();
6261 temporary_channel_id: self.context.channel_id,
6262 funding_satoshis: self.context.channel_value_satoshis,
6263 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6264 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6265 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6266 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6267 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6268 feerate_per_kw: self.context.feerate_per_kw as u32,
6269 to_self_delay: self.context.get_holder_selected_contest_delay(),
6270 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6271 funding_pubkey: keys.funding_pubkey,
6272 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6273 payment_point: keys.payment_point,
6274 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6275 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6276 first_per_commitment_point,
6277 channel_flags: if self.context.config.announced_channel {1} else {0},
6278 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6279 Some(script) => script.clone().into_inner(),
6280 None => Builder::new().into_script(),
6282 channel_type: Some(self.context.channel_type.clone()),
6287 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6288 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6290 // Check sanity of message fields:
6291 if !self.context.is_outbound() {
6292 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6294 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6295 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6297 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6298 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6300 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6301 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6303 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6304 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6306 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6307 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6308 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6310 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6311 if msg.htlc_minimum_msat >= full_channel_value_msat {
6312 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6314 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6315 if msg.to_self_delay > max_delay_acceptable {
6316 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6318 if msg.max_accepted_htlcs < 1 {
6319 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6321 if msg.max_accepted_htlcs > MAX_HTLCS {
6322 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6325 // Now check against optional parameters as set by config...
6326 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6327 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6329 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6330 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6332 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6333 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6335 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6336 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6338 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6339 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6341 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6342 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6344 if msg.minimum_depth > peer_limits.max_minimum_depth {
6345 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6348 if let Some(ty) = &msg.channel_type {
6349 if *ty != self.context.channel_type {
6350 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6352 } else if their_features.supports_channel_type() {
6353 // Assume they've accepted the channel type as they said they understand it.
6355 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6356 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6357 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6359 self.context.channel_type = channel_type.clone();
6360 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6363 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6364 match &msg.shutdown_scriptpubkey {
6365 &Some(ref script) => {
6366 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6367 if script.len() == 0 {
6370 if !script::is_bolt2_compliant(&script, their_features) {
6371 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6373 Some(script.clone())
6376 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6378 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6383 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6384 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6385 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6386 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6387 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6389 if peer_limits.trust_own_funding_0conf {
6390 self.context.minimum_depth = Some(msg.minimum_depth);
6392 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6395 let counterparty_pubkeys = ChannelPublicKeys {
6396 funding_pubkey: msg.funding_pubkey,
6397 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6398 payment_point: msg.payment_point,
6399 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6400 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6403 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6404 selected_contest_delay: msg.to_self_delay,
6405 pubkeys: counterparty_pubkeys,
6408 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6409 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6411 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6412 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6418 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6419 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6420 pub context: ChannelContext<SP>,
6421 pub unfunded_context: UnfundedChannelContext,
6424 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6425 /// Creates a new channel from a remote sides' request for one.
6426 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6427 pub fn new<ES: Deref, F: Deref, L: Deref>(
6428 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6429 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6430 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6431 current_chain_height: u32, logger: &L, is_0conf: bool,
6432 ) -> Result<InboundV1Channel<SP>, ChannelError>
6433 where ES::Target: EntropySource,
6434 F::Target: FeeEstimator,
6437 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6439 // First check the channel type is known, failing before we do anything else if we don't
6440 // support this channel type.
6441 let channel_type = if let Some(channel_type) = &msg.channel_type {
6442 if channel_type.supports_any_optional_bits() {
6443 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6446 // We only support the channel types defined by the `ChannelManager` in
6447 // `provided_channel_type_features`. The channel type must always support
6448 // `static_remote_key`.
6449 if !channel_type.requires_static_remote_key() {
6450 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6452 // Make sure we support all of the features behind the channel type.
6453 if !channel_type.is_subset(our_supported_features) {
6454 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6456 if channel_type.requires_scid_privacy() && announced_channel {
6457 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6459 channel_type.clone()
6461 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6462 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6463 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6468 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6469 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6470 let pubkeys = holder_signer.pubkeys().clone();
6471 let counterparty_pubkeys = ChannelPublicKeys {
6472 funding_pubkey: msg.funding_pubkey,
6473 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6474 payment_point: msg.payment_point,
6475 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6476 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6479 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6480 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6483 // Check sanity of message fields:
6484 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6485 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6487 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6488 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6490 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6491 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6493 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6494 if msg.push_msat > full_channel_value_msat {
6495 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6497 if msg.dust_limit_satoshis > msg.funding_satoshis {
6498 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6500 if msg.htlc_minimum_msat >= full_channel_value_msat {
6501 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6503 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6505 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6506 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6507 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6509 if msg.max_accepted_htlcs < 1 {
6510 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6512 if msg.max_accepted_htlcs > MAX_HTLCS {
6513 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6516 // Now check against optional parameters as set by config...
6517 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6518 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6520 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6521 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6523 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6524 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6526 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6527 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6529 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6530 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6532 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6533 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6535 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6536 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6539 // Convert things into internal flags and prep our state:
6541 if config.channel_handshake_limits.force_announced_channel_preference {
6542 if config.channel_handshake_config.announced_channel != announced_channel {
6543 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6547 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6548 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6549 // Protocol level safety check in place, although it should never happen because
6550 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6551 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6553 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6554 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6556 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6557 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6558 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6560 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6561 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6564 // check if the funder's amount for the initial commitment tx is sufficient
6565 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6566 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6567 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6571 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6572 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6573 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6574 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6577 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6578 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6579 // want to push much to us), our counterparty should always have more than our reserve.
6580 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6581 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6584 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6585 match &msg.shutdown_scriptpubkey {
6586 &Some(ref script) => {
6587 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6588 if script.len() == 0 {
6591 if !script::is_bolt2_compliant(&script, their_features) {
6592 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6594 Some(script.clone())
6597 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6599 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6604 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6605 match signer_provider.get_shutdown_scriptpubkey() {
6606 Ok(scriptpubkey) => Some(scriptpubkey),
6607 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6611 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6612 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6613 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6617 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6618 Ok(script) => script,
6619 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6622 let mut secp_ctx = Secp256k1::new();
6623 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6625 let minimum_depth = if is_0conf {
6628 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6632 context: ChannelContext {
6635 config: LegacyChannelConfig {
6636 options: config.channel_config.clone(),
6638 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6643 inbound_handshake_limits_override: None,
6645 temporary_channel_id: Some(msg.temporary_channel_id),
6646 channel_id: msg.temporary_channel_id,
6647 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6648 announcement_sigs_state: AnnouncementSigsState::NotSent,
6651 latest_monitor_update_id: 0,
6653 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6654 shutdown_scriptpubkey,
6657 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6658 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6659 value_to_self_msat: msg.push_msat,
6661 pending_inbound_htlcs: Vec::new(),
6662 pending_outbound_htlcs: Vec::new(),
6663 holding_cell_htlc_updates: Vec::new(),
6664 pending_update_fee: None,
6665 holding_cell_update_fee: None,
6666 next_holder_htlc_id: 0,
6667 next_counterparty_htlc_id: 0,
6668 update_time_counter: 1,
6670 resend_order: RAACommitmentOrder::CommitmentFirst,
6672 monitor_pending_channel_ready: false,
6673 monitor_pending_revoke_and_ack: false,
6674 monitor_pending_commitment_signed: false,
6675 monitor_pending_forwards: Vec::new(),
6676 monitor_pending_failures: Vec::new(),
6677 monitor_pending_finalized_fulfills: Vec::new(),
6679 signer_pending_commitment_update: false,
6680 signer_pending_funding: false,
6682 #[cfg(debug_assertions)]
6683 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6684 #[cfg(debug_assertions)]
6685 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6687 last_sent_closing_fee: None,
6688 pending_counterparty_closing_signed: None,
6689 expecting_peer_commitment_signed: false,
6690 closing_fee_limits: None,
6691 target_closing_feerate_sats_per_kw: None,
6693 funding_tx_confirmed_in: None,
6694 funding_tx_confirmation_height: 0,
6695 short_channel_id: None,
6696 channel_creation_height: current_chain_height,
6698 feerate_per_kw: msg.feerate_per_kw,
6699 channel_value_satoshis: msg.funding_satoshis,
6700 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6701 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6702 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6703 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6704 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6705 holder_selected_channel_reserve_satoshis,
6706 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6707 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6708 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6709 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6712 counterparty_forwarding_info: None,
6714 channel_transaction_parameters: ChannelTransactionParameters {
6715 holder_pubkeys: pubkeys,
6716 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6717 is_outbound_from_holder: false,
6718 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6719 selected_contest_delay: msg.to_self_delay,
6720 pubkeys: counterparty_pubkeys,
6722 funding_outpoint: None,
6723 channel_type_features: channel_type.clone()
6725 funding_transaction: None,
6726 is_batch_funding: None,
6728 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6729 counterparty_prev_commitment_point: None,
6730 counterparty_node_id,
6732 counterparty_shutdown_scriptpubkey,
6734 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6736 channel_update_status: ChannelUpdateStatus::Enabled,
6737 closing_signed_in_flight: false,
6739 announcement_sigs: None,
6741 #[cfg(any(test, fuzzing))]
6742 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6743 #[cfg(any(test, fuzzing))]
6744 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6746 workaround_lnd_bug_4006: None,
6747 sent_message_awaiting_response: None,
6749 latest_inbound_scid_alias: None,
6750 outbound_scid_alias: 0,
6752 channel_pending_event_emitted: false,
6753 channel_ready_event_emitted: false,
6755 #[cfg(any(test, fuzzing))]
6756 historical_inbound_htlc_fulfills: HashSet::new(),
6761 blocked_monitor_updates: Vec::new(),
6763 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6769 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6770 /// should be sent back to the counterparty node.
6772 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6773 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6774 if self.context.is_outbound() {
6775 panic!("Tried to send accept_channel for an outbound channel?");
6777 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6778 panic!("Tried to send accept_channel after channel had moved forward");
6780 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6781 panic!("Tried to send an accept_channel for a channel that has already advanced");
6784 self.generate_accept_channel_message()
6787 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6788 /// inbound channel. If the intention is to accept an inbound channel, use
6789 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6791 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6792 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6793 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6794 let keys = self.context.get_holder_pubkeys();
6796 msgs::AcceptChannel {
6797 temporary_channel_id: self.context.channel_id,
6798 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6799 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6800 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6801 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6802 minimum_depth: self.context.minimum_depth.unwrap(),
6803 to_self_delay: self.context.get_holder_selected_contest_delay(),
6804 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6805 funding_pubkey: keys.funding_pubkey,
6806 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6807 payment_point: keys.payment_point,
6808 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6809 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6810 first_per_commitment_point,
6811 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6812 Some(script) => script.clone().into_inner(),
6813 None => Builder::new().into_script(),
6815 channel_type: Some(self.context.channel_type.clone()),
6817 next_local_nonce: None,
6821 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6822 /// inbound channel without accepting it.
6824 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6826 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6827 self.generate_accept_channel_message()
6830 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
6831 let funding_script = self.context.get_funding_redeemscript();
6833 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6834 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6835 let trusted_tx = initial_commitment_tx.trust();
6836 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6837 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6838 // They sign the holder commitment transaction...
6839 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6840 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6841 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6842 encode::serialize_hex(&funding_script), &self.context.channel_id());
6843 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6845 Ok(initial_commitment_tx)
6848 pub fn funding_created<L: Deref>(
6849 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6850 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
6854 if self.context.is_outbound() {
6855 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6857 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6858 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6859 // remember the channel, so it's safe to just send an error_message here and drop the
6861 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6863 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6864 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6865 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6866 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6869 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6870 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6871 // This is an externally observable change before we finish all our checks. In particular
6872 // check_funding_created_signature may fail.
6873 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6875 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
6877 Err(ChannelError::Close(e)) => {
6878 self.context.channel_transaction_parameters.funding_outpoint = None;
6879 return Err((self, ChannelError::Close(e)));
6882 // The only error we know how to handle is ChannelError::Close, so we fall over here
6883 // to make sure we don't continue with an inconsistent state.
6884 panic!("unexpected error type from check_funding_created_signature {:?}", e);
6888 let holder_commitment_tx = HolderCommitmentTransaction::new(
6889 initial_commitment_tx,
6892 &self.context.get_holder_pubkeys().funding_pubkey,
6893 self.context.counterparty_funding_pubkey()
6896 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6897 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6900 // Now that we're past error-generating stuff, update our local state:
6902 self.context.channel_state = ChannelState::FundingSent as u32;
6903 self.context.channel_id = funding_txo.to_channel_id();
6904 self.context.cur_counterparty_commitment_transaction_number -= 1;
6905 self.context.cur_holder_commitment_transaction_number -= 1;
6907 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
6909 let funding_redeemscript = self.context.get_funding_redeemscript();
6910 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6911 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6912 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6913 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6914 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6915 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6916 shutdown_script, self.context.get_holder_selected_contest_delay(),
6917 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6918 &self.context.channel_transaction_parameters,
6919 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6921 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6923 channel_monitor.provide_initial_counterparty_commitment_tx(
6924 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6925 self.context.cur_counterparty_commitment_transaction_number + 1,
6926 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6927 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6928 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6930 log_info!(logger, "{} funding_signed for peer for channel {}",
6931 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
6933 // Promote the channel to a full-fledged one now that we have updated the state and have a
6934 // `ChannelMonitor`.
6935 let mut channel = Channel {
6936 context: self.context,
6938 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6939 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6941 Ok((channel, funding_signed, channel_monitor))
6945 const SERIALIZATION_VERSION: u8 = 3;
6946 const MIN_SERIALIZATION_VERSION: u8 = 3;
6948 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
6954 impl Writeable for ChannelUpdateStatus {
6955 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6956 // We only care about writing out the current state as it was announced, ie only either
6957 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
6958 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
6960 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
6961 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
6962 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
6963 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
6969 impl Readable for ChannelUpdateStatus {
6970 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6971 Ok(match <u8 as Readable>::read(reader)? {
6972 0 => ChannelUpdateStatus::Enabled,
6973 1 => ChannelUpdateStatus::Disabled,
6974 _ => return Err(DecodeError::InvalidValue),
6979 impl Writeable for AnnouncementSigsState {
6980 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6981 // We only care about writing out the current state as if we had just disconnected, at
6982 // which point we always set anything but AnnouncementSigsReceived to NotSent.
6984 AnnouncementSigsState::NotSent => 0u8.write(writer),
6985 AnnouncementSigsState::MessageSent => 0u8.write(writer),
6986 AnnouncementSigsState::Committed => 0u8.write(writer),
6987 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
6992 impl Readable for AnnouncementSigsState {
6993 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
6994 Ok(match <u8 as Readable>::read(reader)? {
6995 0 => AnnouncementSigsState::NotSent,
6996 1 => AnnouncementSigsState::PeerReceived,
6997 _ => return Err(DecodeError::InvalidValue),
7002 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7003 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7004 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7007 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7009 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7010 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7011 // the low bytes now and the optional high bytes later.
7012 let user_id_low = self.context.user_id as u64;
7013 user_id_low.write(writer)?;
7015 // Version 1 deserializers expected to read parts of the config object here. Version 2
7016 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7017 // `minimum_depth` we simply write dummy values here.
7018 writer.write_all(&[0; 8])?;
7020 self.context.channel_id.write(writer)?;
7021 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
7022 self.context.channel_value_satoshis.write(writer)?;
7024 self.context.latest_monitor_update_id.write(writer)?;
7026 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7027 // deserialized from that format.
7028 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7029 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7030 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7032 self.context.destination_script.write(writer)?;
7034 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7035 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7036 self.context.value_to_self_msat.write(writer)?;
7038 let mut dropped_inbound_htlcs = 0;
7039 for htlc in self.context.pending_inbound_htlcs.iter() {
7040 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7041 dropped_inbound_htlcs += 1;
7044 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7045 for htlc in self.context.pending_inbound_htlcs.iter() {
7046 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7049 htlc.htlc_id.write(writer)?;
7050 htlc.amount_msat.write(writer)?;
7051 htlc.cltv_expiry.write(writer)?;
7052 htlc.payment_hash.write(writer)?;
7054 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7055 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7057 htlc_state.write(writer)?;
7059 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7061 htlc_state.write(writer)?;
7063 &InboundHTLCState::Committed => {
7066 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7068 removal_reason.write(writer)?;
7073 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7074 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7076 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7077 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
7078 htlc.htlc_id.write(writer)?;
7079 htlc.amount_msat.write(writer)?;
7080 htlc.cltv_expiry.write(writer)?;
7081 htlc.payment_hash.write(writer)?;
7082 htlc.source.write(writer)?;
7084 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7086 onion_packet.write(writer)?;
7088 &OutboundHTLCState::Committed => {
7091 &OutboundHTLCState::RemoteRemoved(_) => {
7092 // Treat this as a Committed because we haven't received the CS - they'll
7093 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7096 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7098 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7099 preimages.push(preimage);
7101 let reason: Option<&HTLCFailReason> = outcome.into();
7102 reason.write(writer)?;
7104 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7106 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7107 preimages.push(preimage);
7109 let reason: Option<&HTLCFailReason> = outcome.into();
7110 reason.write(writer)?;
7113 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
7114 if pending_outbound_skimmed_fees.is_empty() {
7115 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
7117 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
7118 } else if !pending_outbound_skimmed_fees.is_empty() {
7119 pending_outbound_skimmed_fees.push(None);
7123 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7124 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7125 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7127 &HTLCUpdateAwaitingACK::AddHTLC {
7128 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7132 amount_msat.write(writer)?;
7133 cltv_expiry.write(writer)?;
7134 payment_hash.write(writer)?;
7135 source.write(writer)?;
7136 onion_routing_packet.write(writer)?;
7138 if let Some(skimmed_fee) = skimmed_fee_msat {
7139 if holding_cell_skimmed_fees.is_empty() {
7140 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7142 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7143 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7145 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7147 payment_preimage.write(writer)?;
7148 htlc_id.write(writer)?;
7150 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7152 htlc_id.write(writer)?;
7153 err_packet.write(writer)?;
7158 match self.context.resend_order {
7159 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7160 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7163 self.context.monitor_pending_channel_ready.write(writer)?;
7164 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7165 self.context.monitor_pending_commitment_signed.write(writer)?;
7167 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7168 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7169 pending_forward.write(writer)?;
7170 htlc_id.write(writer)?;
7173 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7174 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7175 htlc_source.write(writer)?;
7176 payment_hash.write(writer)?;
7177 fail_reason.write(writer)?;
7180 if self.context.is_outbound() {
7181 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7182 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7183 Some(feerate).write(writer)?;
7185 // As for inbound HTLCs, if the update was only announced and never committed in a
7186 // commitment_signed, drop it.
7187 None::<u32>.write(writer)?;
7189 self.context.holding_cell_update_fee.write(writer)?;
7191 self.context.next_holder_htlc_id.write(writer)?;
7192 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7193 self.context.update_time_counter.write(writer)?;
7194 self.context.feerate_per_kw.write(writer)?;
7196 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7197 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7198 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7199 // consider the stale state on reload.
7202 self.context.funding_tx_confirmed_in.write(writer)?;
7203 self.context.funding_tx_confirmation_height.write(writer)?;
7204 self.context.short_channel_id.write(writer)?;
7206 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7207 self.context.holder_dust_limit_satoshis.write(writer)?;
7208 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7210 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7211 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7213 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7214 self.context.holder_htlc_minimum_msat.write(writer)?;
7215 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7217 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7218 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7220 match &self.context.counterparty_forwarding_info {
7223 info.fee_base_msat.write(writer)?;
7224 info.fee_proportional_millionths.write(writer)?;
7225 info.cltv_expiry_delta.write(writer)?;
7227 None => 0u8.write(writer)?
7230 self.context.channel_transaction_parameters.write(writer)?;
7231 self.context.funding_transaction.write(writer)?;
7233 self.context.counterparty_cur_commitment_point.write(writer)?;
7234 self.context.counterparty_prev_commitment_point.write(writer)?;
7235 self.context.counterparty_node_id.write(writer)?;
7237 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7239 self.context.commitment_secrets.write(writer)?;
7241 self.context.channel_update_status.write(writer)?;
7243 #[cfg(any(test, fuzzing))]
7244 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7245 #[cfg(any(test, fuzzing))]
7246 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7247 htlc.write(writer)?;
7250 // If the channel type is something other than only-static-remote-key, then we need to have
7251 // older clients fail to deserialize this channel at all. If the type is
7252 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7254 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7255 Some(&self.context.channel_type) } else { None };
7257 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7258 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7259 // a different percentage of the channel value then 10%, which older versions of LDK used
7260 // to set it to before the percentage was made configurable.
7261 let serialized_holder_selected_reserve =
7262 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7263 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7265 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7266 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7267 let serialized_holder_htlc_max_in_flight =
7268 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7269 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7271 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7272 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7274 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7275 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7276 // we write the high bytes as an option here.
7277 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7279 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7281 write_tlv_fields!(writer, {
7282 (0, self.context.announcement_sigs, option),
7283 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7284 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7285 // them twice, once with their original default values above, and once as an option
7286 // here. On the read side, old versions will simply ignore the odd-type entries here,
7287 // and new versions map the default values to None and allow the TLV entries here to
7289 (1, self.context.minimum_depth, option),
7290 (2, chan_type, option),
7291 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7292 (4, serialized_holder_selected_reserve, option),
7293 (5, self.context.config, required),
7294 (6, serialized_holder_htlc_max_in_flight, option),
7295 (7, self.context.shutdown_scriptpubkey, option),
7296 (8, self.context.blocked_monitor_updates, optional_vec),
7297 (9, self.context.target_closing_feerate_sats_per_kw, option),
7298 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7299 (13, self.context.channel_creation_height, required),
7300 (15, preimages, required_vec),
7301 (17, self.context.announcement_sigs_state, required),
7302 (19, self.context.latest_inbound_scid_alias, option),
7303 (21, self.context.outbound_scid_alias, required),
7304 (23, channel_ready_event_emitted, option),
7305 (25, user_id_high_opt, option),
7306 (27, self.context.channel_keys_id, required),
7307 (28, holder_max_accepted_htlcs, option),
7308 (29, self.context.temporary_channel_id, option),
7309 (31, channel_pending_event_emitted, option),
7310 (35, pending_outbound_skimmed_fees, optional_vec),
7311 (37, holding_cell_skimmed_fees, optional_vec),
7312 (38, self.context.is_batch_funding, option),
7319 const MAX_ALLOC_SIZE: usize = 64*1024;
7320 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7322 ES::Target: EntropySource,
7323 SP::Target: SignerProvider
7325 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7326 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7327 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7329 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7330 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7331 // the low bytes now and the high bytes later.
7332 let user_id_low: u64 = Readable::read(reader)?;
7334 let mut config = Some(LegacyChannelConfig::default());
7336 // Read the old serialization of the ChannelConfig from version 0.0.98.
7337 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7338 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7339 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7340 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7342 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7343 let mut _val: u64 = Readable::read(reader)?;
7346 let channel_id = Readable::read(reader)?;
7347 let channel_state = Readable::read(reader)?;
7348 let channel_value_satoshis = Readable::read(reader)?;
7350 let latest_monitor_update_id = Readable::read(reader)?;
7352 let mut keys_data = None;
7354 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7355 // the `channel_keys_id` TLV is present below.
7356 let keys_len: u32 = Readable::read(reader)?;
7357 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7358 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7359 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7360 let mut data = [0; 1024];
7361 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7362 reader.read_exact(read_slice)?;
7363 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7367 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7368 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7369 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7372 let destination_script = Readable::read(reader)?;
7374 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7375 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7376 let value_to_self_msat = Readable::read(reader)?;
7378 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7380 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7381 for _ in 0..pending_inbound_htlc_count {
7382 pending_inbound_htlcs.push(InboundHTLCOutput {
7383 htlc_id: Readable::read(reader)?,
7384 amount_msat: Readable::read(reader)?,
7385 cltv_expiry: Readable::read(reader)?,
7386 payment_hash: Readable::read(reader)?,
7387 state: match <u8 as Readable>::read(reader)? {
7388 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7389 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7390 3 => InboundHTLCState::Committed,
7391 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7392 _ => return Err(DecodeError::InvalidValue),
7397 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7398 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7399 for _ in 0..pending_outbound_htlc_count {
7400 pending_outbound_htlcs.push(OutboundHTLCOutput {
7401 htlc_id: Readable::read(reader)?,
7402 amount_msat: Readable::read(reader)?,
7403 cltv_expiry: Readable::read(reader)?,
7404 payment_hash: Readable::read(reader)?,
7405 source: Readable::read(reader)?,
7406 state: match <u8 as Readable>::read(reader)? {
7407 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7408 1 => OutboundHTLCState::Committed,
7410 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7411 OutboundHTLCState::RemoteRemoved(option.into())
7414 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7415 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7418 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7419 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7421 _ => return Err(DecodeError::InvalidValue),
7423 skimmed_fee_msat: None,
7427 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7428 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7429 for _ in 0..holding_cell_htlc_update_count {
7430 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7431 0 => HTLCUpdateAwaitingACK::AddHTLC {
7432 amount_msat: Readable::read(reader)?,
7433 cltv_expiry: Readable::read(reader)?,
7434 payment_hash: Readable::read(reader)?,
7435 source: Readable::read(reader)?,
7436 onion_routing_packet: Readable::read(reader)?,
7437 skimmed_fee_msat: None,
7439 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7440 payment_preimage: Readable::read(reader)?,
7441 htlc_id: Readable::read(reader)?,
7443 2 => HTLCUpdateAwaitingACK::FailHTLC {
7444 htlc_id: Readable::read(reader)?,
7445 err_packet: Readable::read(reader)?,
7447 _ => return Err(DecodeError::InvalidValue),
7451 let resend_order = match <u8 as Readable>::read(reader)? {
7452 0 => RAACommitmentOrder::CommitmentFirst,
7453 1 => RAACommitmentOrder::RevokeAndACKFirst,
7454 _ => return Err(DecodeError::InvalidValue),
7457 let monitor_pending_channel_ready = Readable::read(reader)?;
7458 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7459 let monitor_pending_commitment_signed = Readable::read(reader)?;
7461 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7462 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7463 for _ in 0..monitor_pending_forwards_count {
7464 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7467 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7468 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7469 for _ in 0..monitor_pending_failures_count {
7470 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7473 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7475 let holding_cell_update_fee = Readable::read(reader)?;
7477 let next_holder_htlc_id = Readable::read(reader)?;
7478 let next_counterparty_htlc_id = Readable::read(reader)?;
7479 let update_time_counter = Readable::read(reader)?;
7480 let feerate_per_kw = Readable::read(reader)?;
7482 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7483 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7484 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7485 // consider the stale state on reload.
7486 match <u8 as Readable>::read(reader)? {
7489 let _: u32 = Readable::read(reader)?;
7490 let _: u64 = Readable::read(reader)?;
7491 let _: Signature = Readable::read(reader)?;
7493 _ => return Err(DecodeError::InvalidValue),
7496 let funding_tx_confirmed_in = Readable::read(reader)?;
7497 let funding_tx_confirmation_height = Readable::read(reader)?;
7498 let short_channel_id = Readable::read(reader)?;
7500 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7501 let holder_dust_limit_satoshis = Readable::read(reader)?;
7502 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7503 let mut counterparty_selected_channel_reserve_satoshis = None;
7505 // Read the old serialization from version 0.0.98.
7506 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7508 // Read the 8 bytes of backwards-compatibility data.
7509 let _dummy: u64 = Readable::read(reader)?;
7511 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7512 let holder_htlc_minimum_msat = Readable::read(reader)?;
7513 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7515 let mut minimum_depth = None;
7517 // Read the old serialization from version 0.0.98.
7518 minimum_depth = Some(Readable::read(reader)?);
7520 // Read the 4 bytes of backwards-compatibility data.
7521 let _dummy: u32 = Readable::read(reader)?;
7524 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7526 1 => Some(CounterpartyForwardingInfo {
7527 fee_base_msat: Readable::read(reader)?,
7528 fee_proportional_millionths: Readable::read(reader)?,
7529 cltv_expiry_delta: Readable::read(reader)?,
7531 _ => return Err(DecodeError::InvalidValue),
7534 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7535 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7537 let counterparty_cur_commitment_point = Readable::read(reader)?;
7539 let counterparty_prev_commitment_point = Readable::read(reader)?;
7540 let counterparty_node_id = Readable::read(reader)?;
7542 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7543 let commitment_secrets = Readable::read(reader)?;
7545 let channel_update_status = Readable::read(reader)?;
7547 #[cfg(any(test, fuzzing))]
7548 let mut historical_inbound_htlc_fulfills = HashSet::new();
7549 #[cfg(any(test, fuzzing))]
7551 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7552 for _ in 0..htlc_fulfills_len {
7553 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7557 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7558 Some((feerate, if channel_parameters.is_outbound_from_holder {
7559 FeeUpdateState::Outbound
7561 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7567 let mut announcement_sigs = None;
7568 let mut target_closing_feerate_sats_per_kw = None;
7569 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7570 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7571 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7572 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7573 // only, so we default to that if none was written.
7574 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7575 let mut channel_creation_height = Some(serialized_height);
7576 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7578 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7579 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7580 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7581 let mut latest_inbound_scid_alias = None;
7582 let mut outbound_scid_alias = None;
7583 let mut channel_pending_event_emitted = None;
7584 let mut channel_ready_event_emitted = None;
7586 let mut user_id_high_opt: Option<u64> = None;
7587 let mut channel_keys_id: Option<[u8; 32]> = None;
7588 let mut temporary_channel_id: Option<ChannelId> = None;
7589 let mut holder_max_accepted_htlcs: Option<u16> = None;
7591 let mut blocked_monitor_updates = Some(Vec::new());
7593 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7594 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7596 let mut is_batch_funding: Option<()> = None;
7598 read_tlv_fields!(reader, {
7599 (0, announcement_sigs, option),
7600 (1, minimum_depth, option),
7601 (2, channel_type, option),
7602 (3, counterparty_selected_channel_reserve_satoshis, option),
7603 (4, holder_selected_channel_reserve_satoshis, option),
7604 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7605 (6, holder_max_htlc_value_in_flight_msat, option),
7606 (7, shutdown_scriptpubkey, option),
7607 (8, blocked_monitor_updates, optional_vec),
7608 (9, target_closing_feerate_sats_per_kw, option),
7609 (11, monitor_pending_finalized_fulfills, optional_vec),
7610 (13, channel_creation_height, option),
7611 (15, preimages_opt, optional_vec),
7612 (17, announcement_sigs_state, option),
7613 (19, latest_inbound_scid_alias, option),
7614 (21, outbound_scid_alias, option),
7615 (23, channel_ready_event_emitted, option),
7616 (25, user_id_high_opt, option),
7617 (27, channel_keys_id, option),
7618 (28, holder_max_accepted_htlcs, option),
7619 (29, temporary_channel_id, option),
7620 (31, channel_pending_event_emitted, option),
7621 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7622 (37, holding_cell_skimmed_fees_opt, optional_vec),
7623 (38, is_batch_funding, option),
7626 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7627 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7628 // If we've gotten to the funding stage of the channel, populate the signer with its
7629 // required channel parameters.
7630 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7631 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7632 holder_signer.provide_channel_parameters(&channel_parameters);
7634 (channel_keys_id, holder_signer)
7636 // `keys_data` can be `None` if we had corrupted data.
7637 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7638 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7639 (holder_signer.channel_keys_id(), holder_signer)
7642 if let Some(preimages) = preimages_opt {
7643 let mut iter = preimages.into_iter();
7644 for htlc in pending_outbound_htlcs.iter_mut() {
7646 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7647 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7649 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7650 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7655 // We expect all preimages to be consumed above
7656 if iter.next().is_some() {
7657 return Err(DecodeError::InvalidValue);
7661 let chan_features = channel_type.as_ref().unwrap();
7662 if !chan_features.is_subset(our_supported_features) {
7663 // If the channel was written by a new version and negotiated with features we don't
7664 // understand yet, refuse to read it.
7665 return Err(DecodeError::UnknownRequiredFeature);
7668 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7669 // To account for that, we're proactively setting/overriding the field here.
7670 channel_parameters.channel_type_features = chan_features.clone();
7672 let mut secp_ctx = Secp256k1::new();
7673 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7675 // `user_id` used to be a single u64 value. In order to remain backwards
7676 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7677 // separate u64 values.
7678 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7680 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7682 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7683 let mut iter = skimmed_fees.into_iter();
7684 for htlc in pending_outbound_htlcs.iter_mut() {
7685 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7687 // We expect all skimmed fees to be consumed above
7688 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7690 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7691 let mut iter = skimmed_fees.into_iter();
7692 for htlc in holding_cell_htlc_updates.iter_mut() {
7693 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7694 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7697 // We expect all skimmed fees to be consumed above
7698 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7702 context: ChannelContext {
7705 config: config.unwrap(),
7709 // Note that we don't care about serializing handshake limits as we only ever serialize
7710 // channel data after the handshake has completed.
7711 inbound_handshake_limits_override: None,
7714 temporary_channel_id,
7716 announcement_sigs_state: announcement_sigs_state.unwrap(),
7718 channel_value_satoshis,
7720 latest_monitor_update_id,
7722 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7723 shutdown_scriptpubkey,
7726 cur_holder_commitment_transaction_number,
7727 cur_counterparty_commitment_transaction_number,
7730 holder_max_accepted_htlcs,
7731 pending_inbound_htlcs,
7732 pending_outbound_htlcs,
7733 holding_cell_htlc_updates,
7737 monitor_pending_channel_ready,
7738 monitor_pending_revoke_and_ack,
7739 monitor_pending_commitment_signed,
7740 monitor_pending_forwards,
7741 monitor_pending_failures,
7742 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7744 signer_pending_commitment_update: false,
7745 signer_pending_funding: false,
7748 holding_cell_update_fee,
7749 next_holder_htlc_id,
7750 next_counterparty_htlc_id,
7751 update_time_counter,
7754 #[cfg(debug_assertions)]
7755 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7756 #[cfg(debug_assertions)]
7757 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7759 last_sent_closing_fee: None,
7760 pending_counterparty_closing_signed: None,
7761 expecting_peer_commitment_signed: false,
7762 closing_fee_limits: None,
7763 target_closing_feerate_sats_per_kw,
7765 funding_tx_confirmed_in,
7766 funding_tx_confirmation_height,
7768 channel_creation_height: channel_creation_height.unwrap(),
7770 counterparty_dust_limit_satoshis,
7771 holder_dust_limit_satoshis,
7772 counterparty_max_htlc_value_in_flight_msat,
7773 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7774 counterparty_selected_channel_reserve_satoshis,
7775 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7776 counterparty_htlc_minimum_msat,
7777 holder_htlc_minimum_msat,
7778 counterparty_max_accepted_htlcs,
7781 counterparty_forwarding_info,
7783 channel_transaction_parameters: channel_parameters,
7784 funding_transaction,
7787 counterparty_cur_commitment_point,
7788 counterparty_prev_commitment_point,
7789 counterparty_node_id,
7791 counterparty_shutdown_scriptpubkey,
7795 channel_update_status,
7796 closing_signed_in_flight: false,
7800 #[cfg(any(test, fuzzing))]
7801 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7802 #[cfg(any(test, fuzzing))]
7803 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7805 workaround_lnd_bug_4006: None,
7806 sent_message_awaiting_response: None,
7808 latest_inbound_scid_alias,
7809 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7810 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7812 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7813 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7815 #[cfg(any(test, fuzzing))]
7816 historical_inbound_htlc_fulfills,
7818 channel_type: channel_type.unwrap(),
7821 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7830 use bitcoin::blockdata::constants::ChainHash;
7831 use bitcoin::blockdata::script::{ScriptBuf, Builder};
7832 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7833 use bitcoin::blockdata::opcodes;
7834 use bitcoin::network::constants::Network;
7835 use crate::ln::PaymentHash;
7836 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
7837 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7838 use crate::ln::channel::InitFeatures;
7839 use crate::ln::channel::{ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
7840 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7841 use crate::ln::features::ChannelTypeFeatures;
7842 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7843 use crate::ln::script::ShutdownScript;
7844 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
7845 use crate::chain::BestBlock;
7846 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7847 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7848 use crate::chain::transaction::OutPoint;
7849 use crate::routing::router::Path;
7850 use crate::util::config::UserConfig;
7851 use crate::util::errors::APIError;
7852 use crate::util::test_utils;
7853 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7854 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7855 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7856 use bitcoin::secp256k1::{SecretKey,PublicKey};
7857 use bitcoin::hashes::sha256::Hash as Sha256;
7858 use bitcoin::hashes::Hash;
7859 use bitcoin::hashes::hex::FromHex;
7860 use bitcoin::hash_types::WPubkeyHash;
7861 use bitcoin::blockdata::locktime::absolute::LockTime;
7862 use bitcoin::address::{WitnessProgram, WitnessVersion};
7863 use crate::prelude::*;
7865 struct TestFeeEstimator {
7868 impl FeeEstimator for TestFeeEstimator {
7869 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7875 fn test_max_funding_satoshis_no_wumbo() {
7876 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7877 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7878 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7882 signer: InMemorySigner,
7885 impl EntropySource for Keys {
7886 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7889 impl SignerProvider for Keys {
7890 type EcdsaSigner = InMemorySigner;
7892 type TaprootSigner = InMemorySigner;
7894 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7895 self.signer.channel_keys_id()
7898 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
7902 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
7904 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
7905 let secp_ctx = Secp256k1::signing_only();
7906 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7907 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7908 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
7911 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
7912 let secp_ctx = Secp256k1::signing_only();
7913 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7914 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
7918 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
7919 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
7920 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
7924 fn upfront_shutdown_script_incompatibility() {
7925 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
7926 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
7927 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
7930 let seed = [42; 32];
7931 let network = Network::Testnet;
7932 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7933 keys_provider.expect(OnGetShutdownScriptpubkey {
7934 returns: non_v0_segwit_shutdown_script.clone(),
7937 let secp_ctx = Secp256k1::new();
7938 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7939 let config = UserConfig::default();
7940 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
7941 Err(APIError::IncompatibleShutdownScript { script }) => {
7942 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
7944 Err(e) => panic!("Unexpected error: {:?}", e),
7945 Ok(_) => panic!("Expected error"),
7949 // Check that, during channel creation, we use the same feerate in the open channel message
7950 // as we do in the Channel object creation itself.
7952 fn test_open_channel_msg_fee() {
7953 let original_fee = 253;
7954 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
7955 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
7956 let secp_ctx = Secp256k1::new();
7957 let seed = [42; 32];
7958 let network = Network::Testnet;
7959 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7961 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7962 let config = UserConfig::default();
7963 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
7965 // Now change the fee so we can check that the fee in the open_channel message is the
7966 // same as the old fee.
7967 fee_est.fee_est = 500;
7968 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7969 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
7973 fn test_holder_vs_counterparty_dust_limit() {
7974 // Test that when calculating the local and remote commitment transaction fees, the correct
7975 // dust limits are used.
7976 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
7977 let secp_ctx = Secp256k1::new();
7978 let seed = [42; 32];
7979 let network = Network::Testnet;
7980 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
7981 let logger = test_utils::TestLogger::new();
7982 let best_block = BestBlock::from_network(network);
7984 // Go through the flow of opening a channel between two nodes, making sure
7985 // they have different dust limits.
7987 // Create Node A's channel pointing to Node B's pubkey
7988 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
7989 let config = UserConfig::default();
7990 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
7992 // Create Node B's channel by receiving Node A's open_channel message
7993 // Make sure A's dust limit is as we expect.
7994 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
7995 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
7996 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
7998 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
7999 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8000 accept_channel_msg.dust_limit_satoshis = 546;
8001 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8002 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8004 // Node A --> Node B: funding created
8005 let output_script = node_a_chan.context.get_funding_redeemscript();
8006 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8007 value: 10000000, script_pubkey: output_script.clone(),
8009 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8010 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8011 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8013 // Node B --> Node A: funding signed
8014 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8016 // Put some inbound and outbound HTLCs in A's channel.
8017 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8018 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8020 amount_msat: htlc_amount_msat,
8021 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8022 cltv_expiry: 300000000,
8023 state: InboundHTLCState::Committed,
8026 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8028 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8029 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8030 cltv_expiry: 200000000,
8031 state: OutboundHTLCState::Committed,
8032 source: HTLCSource::OutboundRoute {
8033 path: Path { hops: Vec::new(), blinded_tail: None },
8034 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8035 first_hop_htlc_msat: 548,
8036 payment_id: PaymentId([42; 32]),
8038 skimmed_fee_msat: None,
8041 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8042 // the dust limit check.
8043 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8044 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8045 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8046 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8048 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8049 // of the HTLCs are seen to be above the dust limit.
8050 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8051 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8052 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8053 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8054 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8058 fn test_timeout_vs_success_htlc_dust_limit() {
8059 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8060 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8061 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8062 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8063 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8064 let secp_ctx = Secp256k1::new();
8065 let seed = [42; 32];
8066 let network = Network::Testnet;
8067 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8069 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8070 let config = UserConfig::default();
8071 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8073 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8074 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8076 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8077 // counted as dust when it shouldn't be.
8078 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8079 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8080 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8081 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8083 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8084 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8085 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8086 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8087 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8089 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8091 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8092 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8093 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8094 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8095 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8097 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8098 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8099 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8100 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8101 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8105 fn channel_reestablish_no_updates() {
8106 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8107 let logger = test_utils::TestLogger::new();
8108 let secp_ctx = Secp256k1::new();
8109 let seed = [42; 32];
8110 let network = Network::Testnet;
8111 let best_block = BestBlock::from_network(network);
8112 let chain_hash = ChainHash::using_genesis_block(network);
8113 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8115 // Go through the flow of opening a channel between two nodes.
8117 // Create Node A's channel pointing to Node B's pubkey
8118 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8119 let config = UserConfig::default();
8120 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8122 // Create Node B's channel by receiving Node A's open_channel message
8123 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8124 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8125 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8127 // Node B --> Node A: accept channel
8128 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8129 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8131 // Node A --> Node B: funding created
8132 let output_script = node_a_chan.context.get_funding_redeemscript();
8133 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8134 value: 10000000, script_pubkey: output_script.clone(),
8136 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8137 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8138 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8140 // Node B --> Node A: funding signed
8141 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8143 // Now disconnect the two nodes and check that the commitment point in
8144 // Node B's channel_reestablish message is sane.
8145 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8146 let msg = node_b_chan.get_channel_reestablish(&&logger);
8147 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8148 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8149 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8151 // Check that the commitment point in Node A's channel_reestablish message
8153 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8154 let msg = node_a_chan.get_channel_reestablish(&&logger);
8155 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8156 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8157 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8161 fn test_configured_holder_max_htlc_value_in_flight() {
8162 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8163 let logger = test_utils::TestLogger::new();
8164 let secp_ctx = Secp256k1::new();
8165 let seed = [42; 32];
8166 let network = Network::Testnet;
8167 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8168 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8169 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8171 let mut config_2_percent = UserConfig::default();
8172 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8173 let mut config_99_percent = UserConfig::default();
8174 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8175 let mut config_0_percent = UserConfig::default();
8176 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8177 let mut config_101_percent = UserConfig::default();
8178 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8180 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8181 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8182 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8183 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8184 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8185 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8187 // Test with the upper bound - 1 of valid values (99%).
8188 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8189 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8190 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8192 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8194 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8195 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8196 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8197 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8198 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8199 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8201 // Test with the upper bound - 1 of valid values (99%).
8202 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8203 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8204 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8206 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8207 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8208 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8209 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8210 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8212 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8213 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8215 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8216 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8217 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8219 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8220 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8221 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8222 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8223 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8225 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8226 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8228 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8229 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8230 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8234 fn test_configured_holder_selected_channel_reserve_satoshis() {
8236 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8237 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8238 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8240 // Test with valid but unreasonably high channel reserves
8241 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8242 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8243 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8245 // Test with calculated channel reserve less than lower bound
8246 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8247 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8249 // Test with invalid channel reserves since sum of both is greater than or equal
8251 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8252 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8255 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8256 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8257 let logger = test_utils::TestLogger::new();
8258 let secp_ctx = Secp256k1::new();
8259 let seed = [42; 32];
8260 let network = Network::Testnet;
8261 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8262 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8263 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8266 let mut outbound_node_config = UserConfig::default();
8267 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8268 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8270 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8271 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8273 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8274 let mut inbound_node_config = UserConfig::default();
8275 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8277 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8278 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8280 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8282 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8283 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8285 // Channel Negotiations failed
8286 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8287 assert!(result.is_err());
8292 fn channel_update() {
8293 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8294 let logger = test_utils::TestLogger::new();
8295 let secp_ctx = Secp256k1::new();
8296 let seed = [42; 32];
8297 let network = Network::Testnet;
8298 let best_block = BestBlock::from_network(network);
8299 let chain_hash = ChainHash::using_genesis_block(network);
8300 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8302 // Create Node A's channel pointing to Node B's pubkey
8303 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8304 let config = UserConfig::default();
8305 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8307 // Create Node B's channel by receiving Node A's open_channel message
8308 // Make sure A's dust limit is as we expect.
8309 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8310 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8311 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8313 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8314 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8315 accept_channel_msg.dust_limit_satoshis = 546;
8316 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8317 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8319 // Node A --> Node B: funding created
8320 let output_script = node_a_chan.context.get_funding_redeemscript();
8321 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8322 value: 10000000, script_pubkey: output_script.clone(),
8324 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8325 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8326 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8328 // Node B --> Node A: funding signed
8329 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8331 // Make sure that receiving a channel update will update the Channel as expected.
8332 let update = ChannelUpdate {
8333 contents: UnsignedChannelUpdate {
8335 short_channel_id: 0,
8338 cltv_expiry_delta: 100,
8339 htlc_minimum_msat: 5,
8340 htlc_maximum_msat: MAX_VALUE_MSAT,
8342 fee_proportional_millionths: 11,
8343 excess_data: Vec::new(),
8345 signature: Signature::from(unsafe { FFISignature::new() })
8347 assert!(node_a_chan.channel_update(&update).unwrap());
8349 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8350 // change our official htlc_minimum_msat.
8351 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8352 match node_a_chan.context.counterparty_forwarding_info() {
8354 assert_eq!(info.cltv_expiry_delta, 100);
8355 assert_eq!(info.fee_base_msat, 110);
8356 assert_eq!(info.fee_proportional_millionths, 11);
8358 None => panic!("expected counterparty forwarding info to be Some")
8361 assert!(!node_a_chan.channel_update(&update).unwrap());
8364 #[cfg(feature = "_test_vectors")]
8366 fn outbound_commitment_test() {
8367 use bitcoin::sighash;
8368 use bitcoin::consensus::encode::serialize;
8369 use bitcoin::sighash::EcdsaSighashType;
8370 use bitcoin::hashes::hex::FromHex;
8371 use bitcoin::hash_types::Txid;
8372 use bitcoin::secp256k1::Message;
8373 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8374 use crate::ln::PaymentPreimage;
8375 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8376 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8377 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8378 use crate::util::logger::Logger;
8379 use crate::sync::Arc;
8380 use core::str::FromStr;
8381 use hex::DisplayHex;
8383 // Test vectors from BOLT 3 Appendices C and F (anchors):
8384 let feeest = TestFeeEstimator{fee_est: 15000};
8385 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8386 let secp_ctx = Secp256k1::new();
8388 let mut signer = InMemorySigner::new(
8390 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8391 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8392 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8393 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8394 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8396 // These aren't set in the test vectors:
8397 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8403 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8404 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8405 let keys_provider = Keys { signer: signer.clone() };
8407 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8408 let mut config = UserConfig::default();
8409 config.channel_handshake_config.announced_channel = false;
8410 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8411 chan.context.holder_dust_limit_satoshis = 546;
8412 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8414 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8416 let counterparty_pubkeys = ChannelPublicKeys {
8417 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8418 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8419 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8420 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8421 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8423 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8424 CounterpartyChannelTransactionParameters {
8425 pubkeys: counterparty_pubkeys.clone(),
8426 selected_contest_delay: 144
8428 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8429 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8431 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8432 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8434 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8435 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8437 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8438 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8440 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8441 // derived from a commitment_seed, so instead we copy it here and call
8442 // build_commitment_transaction.
8443 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8444 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8445 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8446 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8447 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8449 macro_rules! test_commitment {
8450 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8451 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8452 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8456 macro_rules! test_commitment_with_anchors {
8457 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8458 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8459 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8463 macro_rules! test_commitment_common {
8464 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8465 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8467 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8468 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8470 let htlcs = commitment_stats.htlcs_included.drain(..)
8471 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8473 (commitment_stats.tx, htlcs)
8475 let trusted_tx = commitment_tx.trust();
8476 let unsigned_tx = trusted_tx.built_transaction();
8477 let redeemscript = chan.context.get_funding_redeemscript();
8478 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8479 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8480 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8481 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8483 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8484 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8485 let mut counterparty_htlc_sigs = Vec::new();
8486 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8488 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8489 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8490 counterparty_htlc_sigs.push(remote_signature);
8492 assert_eq!(htlcs.len(), per_htlc.len());
8494 let holder_commitment_tx = HolderCommitmentTransaction::new(
8495 commitment_tx.clone(),
8496 counterparty_signature,
8497 counterparty_htlc_sigs,
8498 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8499 chan.context.counterparty_funding_pubkey()
8501 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8502 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8504 let funding_redeemscript = chan.context.get_funding_redeemscript();
8505 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8506 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8508 // ((htlc, counterparty_sig), (index, holder_sig))
8509 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8512 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8513 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8515 let ref htlc = htlcs[$htlc_idx];
8516 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8517 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8518 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8519 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8520 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8521 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8522 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8524 let mut preimage: Option<PaymentPreimage> = None;
8527 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8528 if out == htlc.payment_hash {
8529 preimage = Some(PaymentPreimage([i; 32]));
8533 assert!(preimage.is_some());
8536 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8537 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8538 channel_derivation_parameters: ChannelDerivationParameters {
8539 value_satoshis: chan.context.channel_value_satoshis,
8540 keys_id: chan.context.channel_keys_id,
8541 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8543 commitment_txid: trusted_tx.txid(),
8544 per_commitment_number: trusted_tx.commitment_number(),
8545 per_commitment_point: trusted_tx.per_commitment_point(),
8546 feerate_per_kw: trusted_tx.feerate_per_kw(),
8548 preimage: preimage.clone(),
8549 counterparty_sig: *htlc_counterparty_sig,
8550 }, &secp_ctx).unwrap();
8551 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8552 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8554 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
8555 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8556 let trusted_tx = holder_commitment_tx.trust();
8557 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8558 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
8559 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
8561 assert!(htlc_counterparty_sig_iter.next().is_none());
8565 // anchors: simple commitment tx with no HTLCs and single anchor
8566 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8567 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8568 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8570 // simple commitment tx with no HTLCs
8571 chan.context.value_to_self_msat = 7000000000;
8573 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8574 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8575 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8577 // anchors: simple commitment tx with no HTLCs
8578 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8579 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8580 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8582 chan.context.pending_inbound_htlcs.push({
8583 let mut out = InboundHTLCOutput{
8585 amount_msat: 1000000,
8587 payment_hash: PaymentHash([0; 32]),
8588 state: InboundHTLCState::Committed,
8590 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
8593 chan.context.pending_inbound_htlcs.push({
8594 let mut out = InboundHTLCOutput{
8596 amount_msat: 2000000,
8598 payment_hash: PaymentHash([0; 32]),
8599 state: InboundHTLCState::Committed,
8601 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
8604 chan.context.pending_outbound_htlcs.push({
8605 let mut out = OutboundHTLCOutput{
8607 amount_msat: 2000000,
8609 payment_hash: PaymentHash([0; 32]),
8610 state: OutboundHTLCState::Committed,
8611 source: HTLCSource::dummy(),
8612 skimmed_fee_msat: None,
8614 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
8617 chan.context.pending_outbound_htlcs.push({
8618 let mut out = OutboundHTLCOutput{
8620 amount_msat: 3000000,
8622 payment_hash: PaymentHash([0; 32]),
8623 state: OutboundHTLCState::Committed,
8624 source: HTLCSource::dummy(),
8625 skimmed_fee_msat: None,
8627 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
8630 chan.context.pending_inbound_htlcs.push({
8631 let mut out = InboundHTLCOutput{
8633 amount_msat: 4000000,
8635 payment_hash: PaymentHash([0; 32]),
8636 state: InboundHTLCState::Committed,
8638 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
8642 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8643 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8644 chan.context.feerate_per_kw = 0;
8646 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8647 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8648 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8651 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8652 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8653 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8656 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8657 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8658 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8661 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8662 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8663 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8666 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8667 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8668 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8671 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8672 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8673 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8676 // commitment tx with seven outputs untrimmed (maximum feerate)
8677 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8678 chan.context.feerate_per_kw = 647;
8680 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8681 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8682 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8685 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8686 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8687 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8690 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8691 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8692 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8695 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8696 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8697 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8700 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8701 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8702 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8705 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8706 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8707 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8710 // commitment tx with six outputs untrimmed (minimum feerate)
8711 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8712 chan.context.feerate_per_kw = 648;
8714 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8715 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8716 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8719 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8720 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8721 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8724 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8725 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8726 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8729 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8730 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8731 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8734 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8735 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8736 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8739 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8740 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8741 chan.context.feerate_per_kw = 645;
8742 chan.context.holder_dust_limit_satoshis = 1001;
8744 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8745 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8746 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8749 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8750 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8751 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8754 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8755 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8756 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8759 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8760 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8761 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8764 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8765 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8766 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8769 // commitment tx with six outputs untrimmed (maximum feerate)
8770 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8771 chan.context.feerate_per_kw = 2069;
8772 chan.context.holder_dust_limit_satoshis = 546;
8774 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8775 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8776 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8779 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8780 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8781 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8784 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8785 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8786 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8789 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8790 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8791 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8794 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8795 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8796 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8799 // commitment tx with five outputs untrimmed (minimum feerate)
8800 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8801 chan.context.feerate_per_kw = 2070;
8803 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8804 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8805 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8808 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8809 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8810 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8813 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8814 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8815 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8818 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
8819 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
8820 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8823 // commitment tx with five outputs untrimmed (maximum feerate)
8824 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8825 chan.context.feerate_per_kw = 2194;
8827 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
8828 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
8829 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8832 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
8833 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
8834 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8837 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
8838 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
8839 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8842 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
8843 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
8844 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8847 // commitment tx with four outputs untrimmed (minimum feerate)
8848 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8849 chan.context.feerate_per_kw = 2195;
8851 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
8852 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
8853 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8856 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
8857 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
8858 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8861 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
8862 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
8863 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8866 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
8867 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8868 chan.context.feerate_per_kw = 2185;
8869 chan.context.holder_dust_limit_satoshis = 2001;
8870 let cached_channel_type = chan.context.channel_type;
8871 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8873 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
8874 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
8875 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8878 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
8879 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
8880 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8883 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
8884 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
8885 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8888 // commitment tx with four outputs untrimmed (maximum feerate)
8889 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8890 chan.context.feerate_per_kw = 3702;
8891 chan.context.holder_dust_limit_satoshis = 546;
8892 chan.context.channel_type = cached_channel_type.clone();
8894 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
8895 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
8896 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8899 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
8900 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
8901 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8904 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
8905 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
8906 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8909 // commitment tx with three outputs untrimmed (minimum feerate)
8910 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8911 chan.context.feerate_per_kw = 3703;
8913 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
8914 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
8915 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8918 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
8919 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
8920 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8923 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
8924 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8925 chan.context.feerate_per_kw = 3687;
8926 chan.context.holder_dust_limit_satoshis = 3001;
8927 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8929 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
8930 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
8931 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8934 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
8935 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
8936 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8939 // commitment tx with three outputs untrimmed (maximum feerate)
8940 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8941 chan.context.feerate_per_kw = 4914;
8942 chan.context.holder_dust_limit_satoshis = 546;
8943 chan.context.channel_type = cached_channel_type.clone();
8945 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
8946 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
8947 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8950 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
8951 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
8952 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8955 // commitment tx with two outputs untrimmed (minimum feerate)
8956 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8957 chan.context.feerate_per_kw = 4915;
8958 chan.context.holder_dust_limit_satoshis = 546;
8960 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
8961 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
8962 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8964 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
8965 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8966 chan.context.feerate_per_kw = 4894;
8967 chan.context.holder_dust_limit_satoshis = 4001;
8968 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8970 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
8971 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
8972 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8974 // commitment tx with two outputs untrimmed (maximum feerate)
8975 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8976 chan.context.feerate_per_kw = 9651180;
8977 chan.context.holder_dust_limit_satoshis = 546;
8978 chan.context.channel_type = cached_channel_type.clone();
8980 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
8981 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
8982 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8984 // commitment tx with one output untrimmed (minimum feerate)
8985 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8986 chan.context.feerate_per_kw = 9651181;
8988 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
8989 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
8990 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8992 // anchors: commitment tx with one output untrimmed (minimum dust limit)
8993 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8994 chan.context.feerate_per_kw = 6216010;
8995 chan.context.holder_dust_limit_satoshis = 4001;
8996 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8998 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
8999 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9000 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9002 // commitment tx with fee greater than funder amount
9003 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9004 chan.context.feerate_per_kw = 9651936;
9005 chan.context.holder_dust_limit_satoshis = 546;
9006 chan.context.channel_type = cached_channel_type;
9008 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9009 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9010 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9012 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9013 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9014 chan.context.feerate_per_kw = 253;
9015 chan.context.pending_inbound_htlcs.clear();
9016 chan.context.pending_inbound_htlcs.push({
9017 let mut out = InboundHTLCOutput{
9019 amount_msat: 2000000,
9021 payment_hash: PaymentHash([0; 32]),
9022 state: InboundHTLCState::Committed,
9024 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9027 chan.context.pending_outbound_htlcs.clear();
9028 chan.context.pending_outbound_htlcs.push({
9029 let mut out = OutboundHTLCOutput{
9031 amount_msat: 5000001,
9033 payment_hash: PaymentHash([0; 32]),
9034 state: OutboundHTLCState::Committed,
9035 source: HTLCSource::dummy(),
9036 skimmed_fee_msat: None,
9038 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9041 chan.context.pending_outbound_htlcs.push({
9042 let mut out = OutboundHTLCOutput{
9044 amount_msat: 5000000,
9046 payment_hash: PaymentHash([0; 32]),
9047 state: OutboundHTLCState::Committed,
9048 source: HTLCSource::dummy(),
9049 skimmed_fee_msat: None,
9051 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9055 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9056 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9057 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9060 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9061 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9062 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9064 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9065 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9066 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9068 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9069 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9070 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9073 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9074 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9075 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9076 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9079 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9080 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9081 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9083 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9084 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9085 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9087 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9088 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9089 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9094 fn test_per_commitment_secret_gen() {
9095 // Test vectors from BOLT 3 Appendix D:
9097 let mut seed = [0; 32];
9098 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9099 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9100 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9102 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9103 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9104 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9106 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9107 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9109 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9110 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9112 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9113 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9114 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9118 fn test_key_derivation() {
9119 // Test vectors from BOLT 3 Appendix E:
9120 let secp_ctx = Secp256k1::new();
9122 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9123 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9125 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9126 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9128 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9129 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9131 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9132 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9134 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9135 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9137 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9138 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9142 fn test_zero_conf_channel_type_support() {
9143 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9144 let secp_ctx = Secp256k1::new();
9145 let seed = [42; 32];
9146 let network = Network::Testnet;
9147 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9148 let logger = test_utils::TestLogger::new();
9150 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9151 let config = UserConfig::default();
9152 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9153 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9155 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9156 channel_type_features.set_zero_conf_required();
9158 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9159 open_channel_msg.channel_type = Some(channel_type_features);
9160 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9161 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9162 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9163 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9164 assert!(res.is_ok());
9168 fn test_supports_anchors_zero_htlc_tx_fee() {
9169 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9170 // resulting `channel_type`.
9171 let secp_ctx = Secp256k1::new();
9172 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9173 let network = Network::Testnet;
9174 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9175 let logger = test_utils::TestLogger::new();
9177 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9178 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9180 let mut config = UserConfig::default();
9181 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9183 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9184 // need to signal it.
9185 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9186 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9187 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9188 &config, 0, 42, None
9190 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9192 let mut expected_channel_type = ChannelTypeFeatures::empty();
9193 expected_channel_type.set_static_remote_key_required();
9194 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9196 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9197 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9198 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9202 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9203 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9204 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9205 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9206 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9209 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9210 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9214 fn test_rejects_implicit_simple_anchors() {
9215 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9216 // each side's `InitFeatures`, it is rejected.
9217 let secp_ctx = Secp256k1::new();
9218 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9219 let network = Network::Testnet;
9220 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9221 let logger = test_utils::TestLogger::new();
9223 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9224 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9226 let config = UserConfig::default();
9228 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9229 let static_remote_key_required: u64 = 1 << 12;
9230 let simple_anchors_required: u64 = 1 << 20;
9231 let raw_init_features = static_remote_key_required | simple_anchors_required;
9232 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9234 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9235 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9236 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9240 // Set `channel_type` to `None` to force the implicit feature negotiation.
9241 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9242 open_channel_msg.channel_type = None;
9244 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9245 // `static_remote_key`, it will fail the channel.
9246 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9247 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9248 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9249 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9251 assert!(channel_b.is_err());
9255 fn test_rejects_simple_anchors_channel_type() {
9256 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9258 let secp_ctx = Secp256k1::new();
9259 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9260 let network = Network::Testnet;
9261 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9262 let logger = test_utils::TestLogger::new();
9264 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9265 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9267 let config = UserConfig::default();
9269 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9270 let static_remote_key_required: u64 = 1 << 12;
9271 let simple_anchors_required: u64 = 1 << 20;
9272 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9273 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9274 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9275 assert!(!simple_anchors_init.requires_unknown_bits());
9276 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9278 // First, we'll try to open a channel between A and B where A requests a channel type for
9279 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9280 // B as it's not supported by LDK.
9281 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9282 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9283 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9287 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9288 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9290 let res = InboundV1Channel::<&TestKeysInterface>::new(
9291 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9292 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9293 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9295 assert!(res.is_err());
9297 // Then, we'll try to open another channel where A requests a channel type for
9298 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9299 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9301 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9302 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9303 10000000, 100000, 42, &config, 0, 42, None
9306 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9308 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9309 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9310 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9311 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9314 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9315 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9317 let res = channel_a.accept_channel(
9318 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9320 assert!(res.is_err());
9324 fn test_waiting_for_batch() {
9325 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9326 let logger = test_utils::TestLogger::new();
9327 let secp_ctx = Secp256k1::new();
9328 let seed = [42; 32];
9329 let network = Network::Testnet;
9330 let best_block = BestBlock::from_network(network);
9331 let chain_hash = ChainHash::using_genesis_block(network);
9332 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9334 let mut config = UserConfig::default();
9335 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9336 // channel in a batch before all channels are ready.
9337 config.channel_handshake_limits.trust_own_funding_0conf = true;
9339 // Create a channel from node a to node b that will be part of batch funding.
9340 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9341 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9346 &channelmanager::provided_init_features(&config),
9356 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9357 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9358 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9363 &channelmanager::provided_channel_type_features(&config),
9364 &channelmanager::provided_init_features(&config),
9370 true, // Allow node b to send a 0conf channel_ready.
9373 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9374 node_a_chan.accept_channel(
9375 &accept_channel_msg,
9376 &config.channel_handshake_limits,
9377 &channelmanager::provided_init_features(&config),
9380 // Fund the channel with a batch funding transaction.
9381 let output_script = node_a_chan.context.get_funding_redeemscript();
9382 let tx = Transaction {
9384 lock_time: LockTime::ZERO,
9388 value: 10000000, script_pubkey: output_script.clone(),
9391 value: 10000000, script_pubkey: Builder::new().into_script(),
9394 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9395 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9400 ).map_err(|_| ()).unwrap();
9401 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9402 &funding_created_msg.unwrap(),
9406 ).map_err(|_| ()).unwrap();
9407 let node_b_updates = node_b_chan.monitor_updating_restored(
9415 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9416 // broadcasting the funding transaction until the batch is ready.
9417 let _ = node_a_chan.funding_signed(
9418 &funding_signed_msg.unwrap(),
9423 let node_a_updates = node_a_chan.monitor_updating_restored(
9430 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9431 // as the funding transaction depends on all channels in the batch becoming ready.
9432 assert!(node_a_updates.channel_ready.is_none());
9433 assert!(node_a_updates.funding_broadcastable.is_none());
9435 node_a_chan.context.channel_state,
9436 ChannelState::FundingSent as u32 |
9437 ChannelState::WaitingForBatch as u32,
9440 // It is possible to receive a 0conf channel_ready from the remote node.
9441 node_a_chan.channel_ready(
9442 &node_b_updates.channel_ready.unwrap(),
9450 node_a_chan.context.channel_state,
9451 ChannelState::FundingSent as u32 |
9452 ChannelState::WaitingForBatch as u32 |
9453 ChannelState::TheirChannelReady as u32,
9456 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9457 node_a_chan.set_batch_ready();
9459 node_a_chan.context.channel_state,
9460 ChannelState::FundingSent as u32 |
9461 ChannelState::TheirChannelReady as u32,
9463 assert!(node_a_chan.check_get_channel_ready(0).is_some());