1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script,Builder};
12 use bitcoin::blockdata::transaction::{Transaction, EcdsaSighashType};
13 use bitcoin::util::sighash;
14 use bitcoin::consensus::encode;
16 use bitcoin::hashes::Hash;
17 use bitcoin::hashes::sha256::Hash as Sha256;
18 use bitcoin::hashes::sha256d::Hash as Sha256d;
19 use bitcoin::hash_types::{Txid, BlockHash};
21 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
22 use bitcoin::secp256k1::{PublicKey,SecretKey};
23 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
24 use bitcoin::secp256k1;
26 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
27 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
29 use crate::ln::msgs::DecodeError;
30 use crate::ln::script::{self, ShutdownScript};
31 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
32 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
33 use crate::ln::chan_utils;
34 use crate::ln::onion_utils::HTLCFailReason;
35 use crate::chain::BestBlock;
36 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
37 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
38 use crate::chain::transaction::{OutPoint, TransactionData};
39 use crate::sign::{EcdsaChannelSigner, WriteableEcdsaChannelSigner, EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
40 use crate::events::ClosureReason;
41 use crate::routing::gossip::NodeId;
42 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
43 use crate::util::logger::Logger;
44 use crate::util::errors::APIError;
45 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
46 use crate::util::scid_utils::scid_from_parts;
49 use crate::prelude::*;
50 use core::{cmp,mem,fmt};
52 #[cfg(any(test, fuzzing, debug_assertions))]
53 use crate::sync::Mutex;
54 use bitcoin::hashes::hex::ToHex;
55 use crate::sign::type_resolver::ChannelSignerType;
58 pub struct ChannelValueStat {
59 pub value_to_self_msat: u64,
60 pub channel_value_msat: u64,
61 pub channel_reserve_msat: u64,
62 pub pending_outbound_htlcs_amount_msat: u64,
63 pub pending_inbound_htlcs_amount_msat: u64,
64 pub holding_cell_outbound_amount_msat: u64,
65 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
66 pub counterparty_dust_limit_msat: u64,
69 pub struct AvailableBalances {
70 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
71 pub balance_msat: u64,
72 /// Total amount available for our counterparty to send to us.
73 pub inbound_capacity_msat: u64,
74 /// Total amount available for us to send to our counterparty.
75 pub outbound_capacity_msat: u64,
76 /// The maximum value we can assign to the next outbound HTLC
77 pub next_outbound_htlc_limit_msat: u64,
78 /// The minimum value we can assign to the next outbound HTLC
79 pub next_outbound_htlc_minimum_msat: u64,
82 #[derive(Debug, Clone, Copy, PartialEq)]
84 // Inbound states mirroring InboundHTLCState
86 AwaitingRemoteRevokeToAnnounce,
87 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
88 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
89 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
90 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
91 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
93 // Outbound state can only be `LocalAnnounced` or `Committed`
97 enum InboundHTLCRemovalReason {
98 FailRelay(msgs::OnionErrorPacket),
99 FailMalformed(([u8; 32], u16)),
100 Fulfill(PaymentPreimage),
103 enum InboundHTLCState {
104 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
105 /// update_add_htlc message for this HTLC.
106 RemoteAnnounced(PendingHTLCStatus),
107 /// Included in a received commitment_signed message (implying we've
108 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
109 /// state (see the example below). We have not yet included this HTLC in a
110 /// commitment_signed message because we are waiting on the remote's
111 /// aforementioned state revocation. One reason this missing remote RAA
112 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
113 /// is because every time we create a new "state", i.e. every time we sign a
114 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
115 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
116 /// sent provided the per_commitment_point for our current commitment tx.
117 /// The other reason we should not send a commitment_signed without their RAA
118 /// is because their RAA serves to ACK our previous commitment_signed.
120 /// Here's an example of how an HTLC could come to be in this state:
121 /// remote --> update_add_htlc(prev_htlc) --> local
122 /// remote --> commitment_signed(prev_htlc) --> local
123 /// remote <-- revoke_and_ack <-- local
124 /// remote <-- commitment_signed(prev_htlc) <-- local
125 /// [note that here, the remote does not respond with a RAA]
126 /// remote --> update_add_htlc(this_htlc) --> local
127 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
128 /// Now `this_htlc` will be assigned this state. It's unable to be officially
129 /// accepted, i.e. included in a commitment_signed, because we're missing the
130 /// RAA that provides our next per_commitment_point. The per_commitment_point
131 /// is used to derive commitment keys, which are used to construct the
132 /// signatures in a commitment_signed message.
133 /// Implies AwaitingRemoteRevoke.
135 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
136 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
137 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
138 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
139 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
140 /// channel (before it can then get forwarded and/or removed).
141 /// Implies AwaitingRemoteRevoke.
142 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
144 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
145 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
147 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
148 /// commitment transaction without it as otherwise we'll have to force-close the channel to
149 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
150 /// anyway). That said, ChannelMonitor does this for us (see
151 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
152 /// our own local state before then, once we're sure that the next commitment_signed and
153 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
154 LocalRemoved(InboundHTLCRemovalReason),
157 struct InboundHTLCOutput {
161 payment_hash: PaymentHash,
162 state: InboundHTLCState,
165 enum OutboundHTLCState {
166 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
167 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
168 /// we will promote to Committed (note that they may not accept it until the next time we
169 /// revoke, but we don't really care about that:
170 /// * they've revoked, so worst case we can announce an old state and get our (option on)
171 /// money back (though we won't), and,
172 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
173 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
174 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
175 /// we'll never get out of sync).
176 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
177 /// OutboundHTLCOutput's size just for a temporary bit
178 LocalAnnounced(Box<msgs::OnionPacket>),
180 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
181 /// the change (though they'll need to revoke before we fail the payment).
182 RemoteRemoved(OutboundHTLCOutcome),
183 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
184 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
185 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
186 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
187 /// remote revoke_and_ack on a previous state before we can do so.
188 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
189 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
190 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
191 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
192 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
193 /// revoke_and_ack to drop completely.
194 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
198 enum OutboundHTLCOutcome {
199 /// LDK version 0.0.105+ will always fill in the preimage here.
200 Success(Option<PaymentPreimage>),
201 Failure(HTLCFailReason),
204 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
205 fn from(o: Option<HTLCFailReason>) -> Self {
207 None => OutboundHTLCOutcome::Success(None),
208 Some(r) => OutboundHTLCOutcome::Failure(r)
213 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
214 fn into(self) -> Option<&'a HTLCFailReason> {
216 OutboundHTLCOutcome::Success(_) => None,
217 OutboundHTLCOutcome::Failure(ref r) => Some(r)
222 struct OutboundHTLCOutput {
226 payment_hash: PaymentHash,
227 state: OutboundHTLCState,
229 skimmed_fee_msat: Option<u64>,
232 /// See AwaitingRemoteRevoke ChannelState for more info
233 enum HTLCUpdateAwaitingACK {
234 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
238 payment_hash: PaymentHash,
240 onion_routing_packet: msgs::OnionPacket,
241 // The extra fee we're skimming off the top of this HTLC.
242 skimmed_fee_msat: Option<u64>,
245 payment_preimage: PaymentPreimage,
250 err_packet: msgs::OnionErrorPacket,
254 /// There are a few "states" and then a number of flags which can be applied:
255 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
256 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
257 /// move on to `ChannelReady`.
258 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
259 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
260 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
262 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
263 OurInitSent = 1 << 0,
264 /// Implies we have received their `open_channel`/`accept_channel` message
265 TheirInitSent = 1 << 1,
266 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
267 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
268 /// upon receipt of `funding_created`, so simply skip this state.
270 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
271 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
272 /// and our counterparty consider the funding transaction confirmed.
274 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
275 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
276 TheirChannelReady = 1 << 4,
277 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
278 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
279 OurChannelReady = 1 << 5,
281 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
282 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
284 PeerDisconnected = 1 << 7,
285 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
286 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
287 /// sending any outbound messages until they've managed to finish.
288 MonitorUpdateInProgress = 1 << 8,
289 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
290 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
291 /// messages as then we will be unable to determine which HTLCs they included in their
292 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
294 /// Flag is set on `ChannelReady`.
295 AwaitingRemoteRevoke = 1 << 9,
296 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
297 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
298 /// to respond with our own shutdown message when possible.
299 RemoteShutdownSent = 1 << 10,
300 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
301 /// point, we may not add any new HTLCs to the channel.
302 LocalShutdownSent = 1 << 11,
303 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
304 /// to drop us, but we store this anyway.
305 ShutdownComplete = 4096,
306 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
307 /// broadcasting of the funding transaction is being held until all channels in the batch
308 /// have received funding_signed and have their monitors persisted.
309 WaitingForBatch = 1 << 13,
311 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
312 ChannelState::LocalShutdownSent as u32 |
313 ChannelState::RemoteShutdownSent as u32;
314 const MULTI_STATE_FLAGS: u32 =
315 BOTH_SIDES_SHUTDOWN_MASK |
316 ChannelState::PeerDisconnected as u32 |
317 ChannelState::MonitorUpdateInProgress as u32;
318 const STATE_FLAGS: u32 =
320 ChannelState::TheirChannelReady as u32 |
321 ChannelState::OurChannelReady as u32 |
322 ChannelState::AwaitingRemoteRevoke as u32 |
323 ChannelState::WaitingForBatch as u32;
325 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
327 pub const DEFAULT_MAX_HTLCS: u16 = 50;
329 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
330 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
331 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
332 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
336 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
338 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
340 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
342 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
343 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
344 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
345 /// `holder_max_htlc_value_in_flight_msat`.
346 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
348 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
349 /// `option_support_large_channel` (aka wumbo channels) is not supported.
351 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
353 /// Total bitcoin supply in satoshis.
354 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
356 /// The maximum network dust limit for standard script formats. This currently represents the
357 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
358 /// transaction non-standard and thus refuses to relay it.
359 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
360 /// implementations use this value for their dust limit today.
361 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
363 /// The maximum channel dust limit we will accept from our counterparty.
364 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
366 /// The dust limit is used for both the commitment transaction outputs as well as the closing
367 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
368 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
369 /// In order to avoid having to concern ourselves with standardness during the closing process, we
370 /// simply require our counterparty to use a dust limit which will leave any segwit output
372 /// See <https://github.com/lightning/bolts/issues/905> for more details.
373 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
375 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
376 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
378 /// Used to return a simple Error back to ChannelManager. Will get converted to a
379 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
380 /// channel_id in ChannelManager.
381 pub(super) enum ChannelError {
387 impl fmt::Debug for ChannelError {
388 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
390 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
391 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
392 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
397 impl fmt::Display for ChannelError {
398 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
400 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
401 &ChannelError::Warn(ref e) => write!(f, "{}", e),
402 &ChannelError::Close(ref e) => write!(f, "{}", e),
407 macro_rules! secp_check {
408 ($res: expr, $err: expr) => {
411 Err(_) => return Err(ChannelError::Close($err)),
416 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
417 /// our counterparty or not. However, we don't want to announce updates right away to avoid
418 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
419 /// our channel_update message and track the current state here.
420 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
421 #[derive(Clone, Copy, PartialEq)]
422 pub(super) enum ChannelUpdateStatus {
423 /// We've announced the channel as enabled and are connected to our peer.
425 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
427 /// Our channel is live again, but we haven't announced the channel as enabled yet.
429 /// We've announced the channel as disabled.
433 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
435 pub enum AnnouncementSigsState {
436 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
437 /// we sent the last `AnnouncementSignatures`.
439 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
440 /// This state never appears on disk - instead we write `NotSent`.
442 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
443 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
444 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
445 /// they send back a `RevokeAndACK`.
446 /// This state never appears on disk - instead we write `NotSent`.
448 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
449 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
453 /// An enum indicating whether the local or remote side offered a given HTLC.
459 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
462 pending_htlcs_value_msat: u64,
463 on_counterparty_tx_dust_exposure_msat: u64,
464 on_holder_tx_dust_exposure_msat: u64,
465 holding_cell_msat: u64,
466 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
469 /// An enum gathering stats on commitment transaction, either local or remote.
470 struct CommitmentStats<'a> {
471 tx: CommitmentTransaction, // the transaction info
472 feerate_per_kw: u32, // the feerate included to build the transaction
473 total_fee_sat: u64, // the total fee included in the transaction
474 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
475 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
476 local_balance_msat: u64, // local balance before fees but considering dust limits
477 remote_balance_msat: u64, // remote balance before fees but considering dust limits
478 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
481 /// Used when calculating whether we or the remote can afford an additional HTLC.
482 struct HTLCCandidate {
484 origin: HTLCInitiator,
488 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
496 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
498 enum UpdateFulfillFetch {
500 monitor_update: ChannelMonitorUpdate,
501 htlc_value_msat: u64,
502 msg: Option<msgs::UpdateFulfillHTLC>,
507 /// The return type of get_update_fulfill_htlc_and_commit.
508 pub enum UpdateFulfillCommitFetch {
509 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
510 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
511 /// previously placed in the holding cell (and has since been removed).
513 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
514 monitor_update: ChannelMonitorUpdate,
515 /// The value of the HTLC which was claimed, in msat.
516 htlc_value_msat: u64,
518 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
519 /// or has been forgotten (presumably previously claimed).
523 /// The return value of `monitor_updating_restored`
524 pub(super) struct MonitorRestoreUpdates {
525 pub raa: Option<msgs::RevokeAndACK>,
526 pub commitment_update: Option<msgs::CommitmentUpdate>,
527 pub order: RAACommitmentOrder,
528 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
529 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
530 pub finalized_claimed_htlcs: Vec<HTLCSource>,
531 pub funding_broadcastable: Option<Transaction>,
532 pub channel_ready: Option<msgs::ChannelReady>,
533 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
536 /// The return value of `signer_maybe_unblocked`.
538 /// When the signer becomes unblocked, any non-`None` event accumulated here should be sent to the
539 /// peer by the caller.
542 pub(super) struct SignerResumeUpdates {
543 /// A `commitment_signed` message, possibly with additional HTLC-related messages (e.g.,
544 /// `update_add_htlc`) that should be placed in the commitment.
546 /// When both this and `raa` contain values, they should be sent to the peer using an ordering
547 /// consistent with `order`.
548 pub commitment_update: Option<msgs::CommitmentUpdate>,
549 /// A `revoke_and_ack` message that should be sent to the peer.
551 /// When both this and `raa` contain values, they should be sent to the peer using an ordering
552 /// consistent with `order`.
553 pub raa: Option<msgs::RevokeAndACK>,
554 /// The order in which the `commitment_signed` and `revoke_and_ack` messages should be provided to
555 /// the peer. Only meaningful if both of these messages are present.
556 pub order: RAACommitmentOrder,
557 /// A `funding_signed` message that should be sent to the peer.
558 pub funding_signed: Option<msgs::FundingSigned>,
559 /// A `funding_created` message that should be sent to the peer.
560 pub funding_created: Option<msgs::FundingCreated>,
561 /// A `channel_ready` message that should be sent to the peer. If present, it should be sent last.
562 pub channel_ready: Option<msgs::ChannelReady>,
566 pub(super) struct UnfundedInboundV1SignerResumeUpdates {
567 pub accept_channel: Option<msgs::AcceptChannel>,
571 pub(super) struct UnfundedOutboundV1SignerResumeUpdates {
572 pub open_channel: Option<msgs::OpenChannel>,
575 /// The return value of `channel_reestablish`
576 pub(super) struct ReestablishResponses {
577 pub channel_ready: Option<msgs::ChannelReady>,
578 pub raa: Option<msgs::RevokeAndACK>,
579 pub commitment_update: Option<msgs::CommitmentUpdate>,
580 pub order: RAACommitmentOrder,
581 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
582 pub shutdown_msg: Option<msgs::Shutdown>,
585 /// The return type of `force_shutdown`
587 /// Contains a tuple with the following:
588 /// - An optional (counterparty_node_id, funding_txo, [`ChannelMonitorUpdate`]) tuple
589 /// - A list of HTLCs to fail back in the form of the (source, payment hash, and this channel's
590 /// counterparty_node_id and channel_id).
591 /// - An optional transaction id identifying a corresponding batch funding transaction.
592 pub(crate) type ShutdownResult = (
593 Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
594 Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
598 /// If the majority of the channels funds are to the fundee and the initiator holds only just
599 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
600 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
601 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
602 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
603 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
604 /// by this multiple without hitting this case, before sending.
605 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
606 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
607 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
608 /// leave the channel less usable as we hold a bigger reserve.
609 #[cfg(any(fuzzing, test))]
610 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
611 #[cfg(not(any(fuzzing, test)))]
612 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
614 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
615 /// channel creation on an inbound channel, we simply force-close and move on.
616 /// This constant is the one suggested in BOLT 2.
617 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
619 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
620 /// not have enough balance value remaining to cover the onchain cost of this new
621 /// HTLC weight. If this happens, our counterparty fails the reception of our
622 /// commitment_signed including this new HTLC due to infringement on the channel
624 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
625 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
626 /// leads to a channel force-close. Ultimately, this is an issue coming from the
627 /// design of LN state machines, allowing asynchronous updates.
628 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
630 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
631 /// commitment transaction fees, with at least this many HTLCs present on the commitment
632 /// transaction (not counting the value of the HTLCs themselves).
633 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
635 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
636 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
637 /// ChannelUpdate prompted by the config update. This value was determined as follows:
639 /// * The expected interval between ticks (1 minute).
640 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
641 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
642 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
643 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
645 /// The number of ticks that may elapse while we're waiting for a response to a
646 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
649 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
650 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
652 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
653 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
654 /// exceeding this age limit will be force-closed and purged from memory.
655 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
657 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
658 pub(crate) const COINBASE_MATURITY: u32 = 100;
660 struct PendingChannelMonitorUpdate {
661 update: ChannelMonitorUpdate,
664 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
665 (0, update, required),
668 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
669 /// its variants containing an appropriate channel struct.
670 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
671 UnfundedOutboundV1(OutboundV1Channel<SP>),
672 UnfundedInboundV1(InboundV1Channel<SP>),
676 impl<'a, SP: Deref> ChannelPhase<SP> where
677 SP::Target: SignerProvider,
678 <SP::Target as SignerProvider>::Signer: ChannelSigner,
680 pub fn context(&'a self) -> &'a ChannelContext<SP> {
682 ChannelPhase::Funded(chan) => &chan.context,
683 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
684 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
688 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
690 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
691 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
692 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
697 /// Contains all state common to unfunded inbound/outbound channels.
698 pub(super) struct UnfundedChannelContext {
699 /// A counter tracking how many ticks have elapsed since this unfunded channel was
700 /// created. If this unfunded channel reaches peer has yet to respond after reaching
701 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
703 /// This is so that we don't keep channels around that haven't progressed to a funded state
704 /// in a timely manner.
705 unfunded_channel_age_ticks: usize,
708 impl UnfundedChannelContext {
709 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
710 /// having reached the unfunded channel age limit.
712 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
713 pub fn should_expire_unfunded_channel(&mut self) -> bool {
714 self.unfunded_channel_age_ticks += 1;
715 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
719 /// Contains everything about the channel including state, and various flags.
720 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
721 config: LegacyChannelConfig,
723 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
724 // constructed using it. The second element in the tuple corresponds to the number of ticks that
725 // have elapsed since the update occurred.
726 prev_config: Option<(ChannelConfig, usize)>,
728 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
732 /// The current channel ID.
733 channel_id: ChannelId,
734 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
735 /// Will be `None` for channels created prior to 0.0.115.
736 temporary_channel_id: Option<ChannelId>,
739 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
740 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
742 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
743 // Note that a number of our tests were written prior to the behavior here which retransmits
744 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
746 #[cfg(any(test, feature = "_test_utils"))]
747 pub(crate) announcement_sigs_state: AnnouncementSigsState,
748 #[cfg(not(any(test, feature = "_test_utils")))]
749 announcement_sigs_state: AnnouncementSigsState,
751 secp_ctx: Secp256k1<secp256k1::All>,
752 channel_value_satoshis: u64,
754 latest_monitor_update_id: u64,
756 holder_signer: ChannelSignerType<<SP::Target as SignerProvider>::Signer>,
757 shutdown_scriptpubkey: Option<ShutdownScript>,
758 destination_script: Script,
760 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
761 // generation start at 0 and count up...this simplifies some parts of implementation at the
762 // cost of others, but should really just be changed.
764 cur_holder_commitment_transaction_number: u64,
766 // The commitment point corresponding to `cur_holder_commitment_transaction_number`, which is the
767 // *next* state. On initial channel construction, this value may be None, in which case that means
768 // that the first commitment point wasn't ready at the time that the channel needed to be created.
769 cur_holder_commitment_point: Option<PublicKey>,
770 // The commitment secret corresponding to `cur_holder_commitment_transaction_number + 2`, which is
771 // the *previous* state.
772 prev_holder_commitment_secret: Option<[u8; 32]>,
773 cur_counterparty_commitment_transaction_number: u64,
774 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
775 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
776 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
777 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
779 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
780 /// need to ensure we resend them in the order we originally generated them. Note that because
781 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
782 /// sufficient to simply set this to the opposite of any message we are generating as we
783 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
784 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
786 resend_order: RAACommitmentOrder,
788 monitor_pending_channel_ready: bool,
789 monitor_pending_revoke_and_ack: bool,
790 monitor_pending_commitment_signed: bool,
792 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
793 // responsible for some of the HTLCs here or not - we don't know whether the update in question
794 // completed or not. We currently ignore these fields entirely when force-closing a channel,
795 // but need to handle this somehow or we run the risk of losing HTLCs!
796 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
797 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
798 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
800 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
801 /// but our signer (initially) refused to give us a signature, we should retry at some point in
802 /// the future when the signer indicates it may have a signature for us.
804 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
805 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
806 signer_pending_commitment_update: bool,
807 /// Similar to [`Self::signer_pending_commitment_update`]: indicates that we've deferred sending a
808 /// `revoke_and_ack`, and should do so once the signer has become unblocked.
809 signer_pending_revoke_and_ack: bool,
810 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
811 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
812 /// outbound or inbound.
813 signer_pending_funding: bool,
814 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send a
815 /// [`msgs::ChannelReady`].
816 signer_pending_channel_ready: bool,
817 /// If we attempted to retrieve the per-commitment point for the next transaction but the signer
818 /// wasn't ready, then this will be set to `true`.
819 signer_pending_commitment_point: bool,
820 /// If we attempted to release the per-commitment secret for the previous transaction but the
821 /// signer wasn't ready, then this will be set to `true`.
822 signer_pending_released_secret: bool,
824 // pending_update_fee is filled when sending and receiving update_fee.
826 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
827 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
828 // generating new commitment transactions with exactly the same criteria as inbound/outbound
829 // HTLCs with similar state.
830 pending_update_fee: Option<(u32, FeeUpdateState)>,
831 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
832 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
833 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
834 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
835 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
836 holding_cell_update_fee: Option<u32>,
837 next_holder_htlc_id: u64,
838 next_counterparty_htlc_id: u64,
841 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
842 /// when the channel is updated in ways which may impact the `channel_update` message or when a
843 /// new block is received, ensuring it's always at least moderately close to the current real
845 update_time_counter: u32,
847 #[cfg(debug_assertions)]
848 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
849 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
850 #[cfg(debug_assertions)]
851 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
852 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
854 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
855 target_closing_feerate_sats_per_kw: Option<u32>,
857 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
858 /// update, we need to delay processing it until later. We do that here by simply storing the
859 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
860 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
862 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
863 /// transaction. These are set once we reach `closing_negotiation_ready`.
865 pub(crate) closing_fee_limits: Option<(u64, u64)>,
867 closing_fee_limits: Option<(u64, u64)>,
869 /// The hash of the block in which the funding transaction was included.
870 funding_tx_confirmed_in: Option<BlockHash>,
871 funding_tx_confirmation_height: u32,
872 short_channel_id: Option<u64>,
873 /// Either the height at which this channel was created or the height at which it was last
874 /// serialized if it was serialized by versions prior to 0.0.103.
875 /// We use this to close if funding is never broadcasted.
876 channel_creation_height: u32,
878 counterparty_dust_limit_satoshis: u64,
881 pub(super) holder_dust_limit_satoshis: u64,
883 holder_dust_limit_satoshis: u64,
886 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
888 counterparty_max_htlc_value_in_flight_msat: u64,
891 pub(super) holder_max_htlc_value_in_flight_msat: u64,
893 holder_max_htlc_value_in_flight_msat: u64,
895 /// minimum channel reserve for self to maintain - set by them.
896 counterparty_selected_channel_reserve_satoshis: Option<u64>,
899 pub(super) holder_selected_channel_reserve_satoshis: u64,
901 holder_selected_channel_reserve_satoshis: u64,
903 counterparty_htlc_minimum_msat: u64,
904 holder_htlc_minimum_msat: u64,
906 pub counterparty_max_accepted_htlcs: u16,
908 counterparty_max_accepted_htlcs: u16,
909 holder_max_accepted_htlcs: u16,
910 minimum_depth: Option<u32>,
912 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
914 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
915 funding_transaction: Option<Transaction>,
916 is_batch_funding: Option<()>,
918 counterparty_cur_commitment_point: Option<PublicKey>,
919 counterparty_prev_commitment_point: Option<PublicKey>,
920 counterparty_node_id: PublicKey,
922 counterparty_shutdown_scriptpubkey: Option<Script>,
924 commitment_secrets: CounterpartyCommitmentSecrets,
926 channel_update_status: ChannelUpdateStatus,
927 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
928 /// not complete within a single timer tick (one minute), we should force-close the channel.
929 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
931 /// Note that this field is reset to false on deserialization to give us a chance to connect to
932 /// our peer and start the closing_signed negotiation fresh.
933 closing_signed_in_flight: bool,
935 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
936 /// This can be used to rebroadcast the channel_announcement message later.
937 announcement_sigs: Option<(Signature, Signature)>,
939 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
940 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
941 // be, by comparing the cached values to the fee of the tranaction generated by
942 // `build_commitment_transaction`.
943 #[cfg(any(test, fuzzing))]
944 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
945 #[cfg(any(test, fuzzing))]
946 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
948 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
949 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
950 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
951 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
952 /// message until we receive a channel_reestablish.
954 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
955 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
957 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
958 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
959 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
960 /// unblock the state machine.
962 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
963 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
964 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
966 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
967 /// [`msgs::RevokeAndACK`] message from the counterparty.
968 sent_message_awaiting_response: Option<usize>,
970 #[cfg(any(test, fuzzing))]
971 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
972 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
973 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
974 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
975 // is fine, but as a sanity check in our failure to generate the second claim, we check here
976 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
977 historical_inbound_htlc_fulfills: HashSet<u64>,
979 /// This channel's type, as negotiated during channel open
980 channel_type: ChannelTypeFeatures,
982 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
983 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
984 // the channel's funding UTXO.
986 // We also use this when sending our peer a channel_update that isn't to be broadcasted
987 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
988 // associated channel mapping.
990 // We only bother storing the most recent SCID alias at any time, though our counterparty has
991 // to store all of them.
992 latest_inbound_scid_alias: Option<u64>,
994 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
995 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
996 // don't currently support node id aliases and eventually privacy should be provided with
997 // blinded paths instead of simple scid+node_id aliases.
998 outbound_scid_alias: u64,
1000 // We track whether we already emitted a `ChannelPending` event.
1001 channel_pending_event_emitted: bool,
1003 // We track whether we already emitted a `ChannelReady` event.
1004 channel_ready_event_emitted: bool,
1006 /// The unique identifier used to re-derive the private key material for the channel through
1007 /// [`SignerProvider::derive_channel_signer`].
1008 channel_keys_id: [u8; 32],
1010 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1011 /// store it here and only release it to the `ChannelManager` once it asks for it.
1012 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1015 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1016 /// Allowed in any state (including after shutdown)
1017 pub fn get_update_time_counter(&self) -> u32 {
1018 self.update_time_counter
1021 pub fn get_latest_monitor_update_id(&self) -> u64 {
1022 self.latest_monitor_update_id
1025 pub fn should_announce(&self) -> bool {
1026 self.config.announced_channel
1029 pub fn is_outbound(&self) -> bool {
1030 self.channel_transaction_parameters.is_outbound_from_holder
1033 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1034 /// Allowed in any state (including after shutdown)
1035 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1036 self.config.options.forwarding_fee_base_msat
1039 /// Returns true if we've ever received a message from the remote end for this Channel
1040 pub fn have_received_message(&self) -> bool {
1041 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1044 /// Returns true if this channel is fully established and not known to be closing.
1045 /// Allowed in any state (including after shutdown)
1046 pub fn is_usable(&self) -> bool {
1047 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1048 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1051 /// shutdown state returns the state of the channel in its various stages of shutdown
1052 pub fn shutdown_state(&self) -> ChannelShutdownState {
1053 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1054 return ChannelShutdownState::ShutdownComplete;
1056 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1057 return ChannelShutdownState::ShutdownInitiated;
1059 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1060 return ChannelShutdownState::ResolvingHTLCs;
1062 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1063 return ChannelShutdownState::NegotiatingClosingFee;
1065 return ChannelShutdownState::NotShuttingDown;
1068 fn closing_negotiation_ready(&self) -> bool {
1069 self.pending_inbound_htlcs.is_empty() &&
1070 self.pending_outbound_htlcs.is_empty() &&
1071 self.pending_update_fee.is_none() &&
1072 self.channel_state &
1073 (BOTH_SIDES_SHUTDOWN_MASK |
1074 ChannelState::AwaitingRemoteRevoke as u32 |
1075 ChannelState::PeerDisconnected as u32 |
1076 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1079 /// Returns true if this channel is currently available for use. This is a superset of
1080 /// is_usable() and considers things like the channel being temporarily disabled.
1081 /// Allowed in any state (including after shutdown)
1082 pub fn is_live(&self) -> bool {
1083 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1086 // Public utilities:
1088 pub fn channel_id(&self) -> ChannelId {
1092 // Return the `temporary_channel_id` used during channel establishment.
1094 // Will return `None` for channels created prior to LDK version 0.0.115.
1095 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1096 self.temporary_channel_id
1099 pub fn minimum_depth(&self) -> Option<u32> {
1103 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1104 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1105 pub fn get_user_id(&self) -> u128 {
1109 /// Gets the channel's type
1110 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1114 /// Gets the channel's `short_channel_id`.
1116 /// Will return `None` if the channel hasn't been confirmed yet.
1117 pub fn get_short_channel_id(&self) -> Option<u64> {
1118 self.short_channel_id
1121 /// Allowed in any state (including after shutdown)
1122 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1123 self.latest_inbound_scid_alias
1126 /// Allowed in any state (including after shutdown)
1127 pub fn outbound_scid_alias(&self) -> u64 {
1128 self.outbound_scid_alias
1131 /// Returns the holder signer for this channel.
1133 pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
1134 return &self.holder_signer
1137 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1138 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1139 /// or prior to any channel actions during `Channel` initialization.
1140 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1141 debug_assert_eq!(self.outbound_scid_alias, 0);
1142 self.outbound_scid_alias = outbound_scid_alias;
1145 /// Returns the funding_txo we either got from our peer, or were given by
1146 /// get_funding_created.
1147 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1148 self.channel_transaction_parameters.funding_outpoint
1151 /// Returns the block hash in which our funding transaction was confirmed.
1152 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1153 self.funding_tx_confirmed_in
1156 /// Returns the current number of confirmations on the funding transaction.
1157 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1158 if self.funding_tx_confirmation_height == 0 {
1159 // We either haven't seen any confirmation yet, or observed a reorg.
1163 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1166 fn get_holder_selected_contest_delay(&self) -> u16 {
1167 self.channel_transaction_parameters.holder_selected_contest_delay
1170 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1171 &self.channel_transaction_parameters.holder_pubkeys
1174 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1175 self.channel_transaction_parameters.counterparty_parameters
1176 .as_ref().map(|params| params.selected_contest_delay)
1179 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1180 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1183 /// Allowed in any state (including after shutdown)
1184 pub fn get_counterparty_node_id(&self) -> PublicKey {
1185 self.counterparty_node_id
1188 /// Allowed in any state (including after shutdown)
1189 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1190 self.holder_htlc_minimum_msat
1193 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1194 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1195 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1198 /// Allowed in any state (including after shutdown)
1199 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1201 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1202 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1203 // channel might have been used to route very small values (either by honest users or as DoS).
1204 self.channel_value_satoshis * 1000 * 9 / 10,
1206 self.counterparty_max_htlc_value_in_flight_msat
1210 /// Allowed in any state (including after shutdown)
1211 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1212 self.counterparty_htlc_minimum_msat
1215 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1216 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1217 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1220 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1221 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1222 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1224 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1225 party_max_htlc_value_in_flight_msat
1230 pub fn get_value_satoshis(&self) -> u64 {
1231 self.channel_value_satoshis
1234 pub fn get_fee_proportional_millionths(&self) -> u32 {
1235 self.config.options.forwarding_fee_proportional_millionths
1238 pub fn get_cltv_expiry_delta(&self) -> u16 {
1239 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1242 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1243 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1244 where F::Target: FeeEstimator
1246 match self.config.options.max_dust_htlc_exposure {
1247 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1248 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1249 ConfirmationTarget::OnChainSweep);
1250 feerate_per_kw as u64 * multiplier
1252 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1256 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1257 pub fn prev_config(&self) -> Option<ChannelConfig> {
1258 self.prev_config.map(|prev_config| prev_config.0)
1261 // Checks whether we should emit a `ChannelPending` event.
1262 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1263 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1266 // Returns whether we already emitted a `ChannelPending` event.
1267 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1268 self.channel_pending_event_emitted
1271 // Remembers that we already emitted a `ChannelPending` event.
1272 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1273 self.channel_pending_event_emitted = true;
1276 // Checks whether we should emit a `ChannelReady` event.
1277 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1278 self.is_usable() && !self.channel_ready_event_emitted
1281 // Remembers that we already emitted a `ChannelReady` event.
1282 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1283 self.channel_ready_event_emitted = true;
1286 /// Retrieves the next commitment point and previous commitment secret from the signer.
1287 pub fn update_holder_per_commitment<L: Deref>(&mut self, logger: &L) where L::Target: Logger
1289 let transaction_number = self.cur_holder_commitment_transaction_number;
1290 let signer = self.holder_signer.as_ref();
1292 log_trace!(logger, "Retrieving commitment point for {} transaction number {}", self.channel_id(), transaction_number);
1293 self.cur_holder_commitment_point = match signer.get_per_commitment_point(transaction_number, &self.secp_ctx) {
1295 if self.signer_pending_commitment_point {
1296 log_trace!(logger, "Commitment point for {} transaction number {} retrieved; clearing signer_pending_commitment_point",
1297 self.channel_id(), transaction_number);
1298 self.signer_pending_commitment_point = false;
1304 if !self.signer_pending_commitment_point {
1305 log_trace!(logger, "Commitment point for {} transaction number {} is not available; setting signer_pending_commitment_point",
1306 self.channel_id(), transaction_number);
1307 self.signer_pending_commitment_point = true;
1313 let releasing_transaction_number = transaction_number + 2;
1314 if releasing_transaction_number <= INITIAL_COMMITMENT_NUMBER {
1315 log_trace!(logger, "Retrieving commitment secret for {} transaction number {}", self.channel_id(), releasing_transaction_number);
1316 self.prev_holder_commitment_secret = match signer.release_commitment_secret(releasing_transaction_number) {
1318 if self.signer_pending_released_secret {
1319 log_trace!(logger, "Commitment secret for {} transaction number {} retrieved; clearing signer_pending_released_secret",
1320 self.channel_id(), releasing_transaction_number);
1321 self.signer_pending_released_secret = false;
1327 if !self.signer_pending_released_secret {
1328 log_trace!(logger, "Commitment secret for {} transaction number {} is not available; setting signer_pending_released_secret",
1329 self.channel_id(), releasing_transaction_number);
1330 self.signer_pending_released_secret = true;
1338 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1339 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1340 /// no longer be considered when forwarding HTLCs.
1341 pub fn maybe_expire_prev_config(&mut self) {
1342 if self.prev_config.is_none() {
1345 let prev_config = self.prev_config.as_mut().unwrap();
1347 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1348 self.prev_config = None;
1352 /// Returns the current [`ChannelConfig`] applied to the channel.
1353 pub fn config(&self) -> ChannelConfig {
1357 /// Updates the channel's config. A bool is returned indicating whether the config update
1358 /// applied resulted in a new ChannelUpdate message.
1359 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1360 let did_channel_update =
1361 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1362 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1363 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1364 if did_channel_update {
1365 self.prev_config = Some((self.config.options, 0));
1366 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1367 // policy change to propagate throughout the network.
1368 self.update_time_counter += 1;
1370 self.config.options = *config;
1374 /// Returns true if funding_signed was sent/received and the
1375 /// funding transaction has been broadcast if necessary.
1376 pub fn is_funding_broadcast(&self) -> bool {
1377 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1378 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1381 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1382 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1383 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1384 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1385 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1387 /// @local is used only to convert relevant internal structures which refer to remote vs local
1388 /// to decide value of outputs and direction of HTLCs.
1389 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1390 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1391 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1392 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1393 /// which peer generated this transaction and "to whom" this transaction flows.
1395 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1396 where L::Target: Logger
1398 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1399 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1400 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1402 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1403 let mut remote_htlc_total_msat = 0;
1404 let mut local_htlc_total_msat = 0;
1405 let mut value_to_self_msat_offset = 0;
1407 let mut feerate_per_kw = self.feerate_per_kw;
1408 if let Some((feerate, update_state)) = self.pending_update_fee {
1409 if match update_state {
1410 // Note that these match the inclusion criteria when scanning
1411 // pending_inbound_htlcs below.
1412 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1413 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1414 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1416 feerate_per_kw = feerate;
1420 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1421 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1422 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1424 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1426 macro_rules! get_htlc_in_commitment {
1427 ($htlc: expr, $offered: expr) => {
1428 HTLCOutputInCommitment {
1430 amount_msat: $htlc.amount_msat,
1431 cltv_expiry: $htlc.cltv_expiry,
1432 payment_hash: $htlc.payment_hash,
1433 transaction_output_index: None
1438 macro_rules! add_htlc_output {
1439 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1440 if $outbound == local { // "offered HTLC output"
1441 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1442 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1445 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1447 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1448 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1449 included_non_dust_htlcs.push((htlc_in_tx, $source));
1451 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1452 included_dust_htlcs.push((htlc_in_tx, $source));
1455 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1456 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1459 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1461 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1462 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1463 included_non_dust_htlcs.push((htlc_in_tx, $source));
1465 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1466 included_dust_htlcs.push((htlc_in_tx, $source));
1472 for ref htlc in self.pending_inbound_htlcs.iter() {
1473 let (include, state_name) = match htlc.state {
1474 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1475 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1476 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1477 InboundHTLCState::Committed => (true, "Committed"),
1478 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1482 add_htlc_output!(htlc, false, None, state_name);
1483 remote_htlc_total_msat += htlc.amount_msat;
1485 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1487 &InboundHTLCState::LocalRemoved(ref reason) => {
1488 if generated_by_local {
1489 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1490 value_to_self_msat_offset += htlc.amount_msat as i64;
1499 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1501 for ref htlc in self.pending_outbound_htlcs.iter() {
1502 let (include, state_name) = match htlc.state {
1503 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1504 OutboundHTLCState::Committed => (true, "Committed"),
1505 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1506 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1507 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1510 let preimage_opt = match htlc.state {
1511 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1512 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1513 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1517 if let Some(preimage) = preimage_opt {
1518 preimages.push(preimage);
1522 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1523 local_htlc_total_msat += htlc.amount_msat;
1525 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1527 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1528 value_to_self_msat_offset -= htlc.amount_msat as i64;
1530 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1531 if !generated_by_local {
1532 value_to_self_msat_offset -= htlc.amount_msat as i64;
1540 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1541 assert!(value_to_self_msat >= 0);
1542 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1543 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1544 // "violate" their reserve value by couting those against it. Thus, we have to convert
1545 // everything to i64 before subtracting as otherwise we can overflow.
1546 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1547 assert!(value_to_remote_msat >= 0);
1549 #[cfg(debug_assertions)]
1551 // Make sure that the to_self/to_remote is always either past the appropriate
1552 // channel_reserve *or* it is making progress towards it.
1553 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1554 self.holder_max_commitment_tx_output.lock().unwrap()
1556 self.counterparty_max_commitment_tx_output.lock().unwrap()
1558 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1559 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1560 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1561 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1564 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1565 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1566 let (value_to_self, value_to_remote) = if self.is_outbound() {
1567 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1569 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1572 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1573 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1574 let (funding_pubkey_a, funding_pubkey_b) = if local {
1575 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1577 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1580 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1581 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1586 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1587 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1592 let num_nondust_htlcs = included_non_dust_htlcs.len();
1594 let channel_parameters =
1595 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1596 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1597 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1604 &mut included_non_dust_htlcs,
1607 let mut htlcs_included = included_non_dust_htlcs;
1608 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1609 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1610 htlcs_included.append(&mut included_dust_htlcs);
1612 // For the stats, trimmed-to-0 the value in msats accordingly
1613 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1614 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1622 local_balance_msat: value_to_self_msat as u64,
1623 remote_balance_msat: value_to_remote_msat as u64,
1629 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1630 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to our
1631 /// counterparty!) The keys are specifically generated for the _next_ state to which the channel
1632 /// is about to advance.
1633 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1634 /// TODO Some magic rust shit to compile-time check this?
1635 fn build_next_holder_transaction_keys(&self) -> TxCreationKeys {
1636 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1637 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1638 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1639 let cur_holder_commitment_point = self.cur_holder_commitment_point
1640 .expect("Holder per-commitment point is not ready");
1642 TxCreationKeys::derive_new(
1643 &self.secp_ctx, &cur_holder_commitment_point, delayed_payment_base, htlc_basepoint,
1644 &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1648 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1649 /// will sign and send to our counterparty.
1650 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1651 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1652 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1653 //may see payments to it!
1654 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1655 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1656 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1658 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1661 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1662 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1663 /// Panics if called before accept_channel/InboundV1Channel::new
1664 pub fn get_funding_redeemscript(&self) -> Script {
1665 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1668 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1669 &self.get_counterparty_pubkeys().funding_pubkey
1672 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1676 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1677 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1678 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1679 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1680 // more dust balance if the feerate increases when we have several HTLCs pending
1681 // which are near the dust limit.
1682 let mut feerate_per_kw = self.feerate_per_kw;
1683 // If there's a pending update fee, use it to ensure we aren't under-estimating
1684 // potential feerate updates coming soon.
1685 if let Some((feerate, _)) = self.pending_update_fee {
1686 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1688 if let Some(feerate) = outbound_feerate_update {
1689 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1691 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1694 /// Get forwarding information for the counterparty.
1695 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1696 self.counterparty_forwarding_info.clone()
1699 /// Returns a HTLCStats about inbound pending htlcs
1700 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1702 let mut stats = HTLCStats {
1703 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1704 pending_htlcs_value_msat: 0,
1705 on_counterparty_tx_dust_exposure_msat: 0,
1706 on_holder_tx_dust_exposure_msat: 0,
1707 holding_cell_msat: 0,
1708 on_holder_tx_holding_cell_htlcs_count: 0,
1711 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1714 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1715 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1716 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1718 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1719 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1720 for ref htlc in context.pending_inbound_htlcs.iter() {
1721 stats.pending_htlcs_value_msat += htlc.amount_msat;
1722 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1723 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1725 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1726 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1732 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1733 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1735 let mut stats = HTLCStats {
1736 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1737 pending_htlcs_value_msat: 0,
1738 on_counterparty_tx_dust_exposure_msat: 0,
1739 on_holder_tx_dust_exposure_msat: 0,
1740 holding_cell_msat: 0,
1741 on_holder_tx_holding_cell_htlcs_count: 0,
1744 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1747 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1748 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1749 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1751 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1752 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1753 for ref htlc in context.pending_outbound_htlcs.iter() {
1754 stats.pending_htlcs_value_msat += htlc.amount_msat;
1755 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1756 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1758 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1759 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1763 for update in context.holding_cell_htlc_updates.iter() {
1764 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1765 stats.pending_htlcs += 1;
1766 stats.pending_htlcs_value_msat += amount_msat;
1767 stats.holding_cell_msat += amount_msat;
1768 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1769 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1771 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1772 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1774 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1781 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1782 /// Doesn't bother handling the
1783 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1784 /// corner case properly.
1785 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1786 -> AvailableBalances
1787 where F::Target: FeeEstimator
1789 let context = &self;
1790 // Note that we have to handle overflow due to the above case.
1791 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1792 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1794 let mut balance_msat = context.value_to_self_msat;
1795 for ref htlc in context.pending_inbound_htlcs.iter() {
1796 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1797 balance_msat += htlc.amount_msat;
1800 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1802 let outbound_capacity_msat = context.value_to_self_msat
1803 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1805 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1807 let mut available_capacity_msat = outbound_capacity_msat;
1809 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1810 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1814 if context.is_outbound() {
1815 // We should mind channel commit tx fee when computing how much of the available capacity
1816 // can be used in the next htlc. Mirrors the logic in send_htlc.
1818 // The fee depends on whether the amount we will be sending is above dust or not,
1819 // and the answer will in turn change the amount itself — making it a circular
1821 // This complicates the computation around dust-values, up to the one-htlc-value.
1822 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1823 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1824 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1827 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1828 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1829 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1830 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1831 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1832 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1833 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1836 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1837 // value ends up being below dust, we have this fee available again. In that case,
1838 // match the value to right-below-dust.
1839 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1840 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1841 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1842 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1843 debug_assert!(one_htlc_difference_msat != 0);
1844 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1845 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1846 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1848 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1851 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1852 // sending a new HTLC won't reduce their balance below our reserve threshold.
1853 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1854 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1855 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1858 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1859 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1861 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1862 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1863 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1865 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1866 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1867 // we've selected for them, we can only send dust HTLCs.
1868 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1872 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1874 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1875 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1876 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1877 // send above the dust limit (as the router can always overpay to meet the dust limit).
1878 let mut remaining_msat_below_dust_exposure_limit = None;
1879 let mut dust_exposure_dust_limit_msat = 0;
1880 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1882 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1883 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1885 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1886 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1887 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1889 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1890 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1891 remaining_msat_below_dust_exposure_limit =
1892 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1893 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1896 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1897 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat as i64 {
1898 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1899 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1900 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1901 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1904 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1905 if available_capacity_msat < dust_exposure_dust_limit_msat {
1906 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1908 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1912 available_capacity_msat = cmp::min(available_capacity_msat,
1913 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1915 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1916 available_capacity_msat = 0;
1920 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1921 - context.value_to_self_msat as i64
1922 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1923 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1925 outbound_capacity_msat,
1926 next_outbound_htlc_limit_msat: available_capacity_msat,
1927 next_outbound_htlc_minimum_msat,
1932 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1933 let context = &self;
1934 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1937 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1938 /// number of pending HTLCs that are on track to be in our next commitment tx.
1940 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1941 /// `fee_spike_buffer_htlc` is `Some`.
1943 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1944 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1946 /// Dust HTLCs are excluded.
1947 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1948 let context = &self;
1949 assert!(context.is_outbound());
1951 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1954 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1955 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1957 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1958 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1960 let mut addl_htlcs = 0;
1961 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1963 HTLCInitiator::LocalOffered => {
1964 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1968 HTLCInitiator::RemoteOffered => {
1969 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1975 let mut included_htlcs = 0;
1976 for ref htlc in context.pending_inbound_htlcs.iter() {
1977 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1980 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1981 // transaction including this HTLC if it times out before they RAA.
1982 included_htlcs += 1;
1985 for ref htlc in context.pending_outbound_htlcs.iter() {
1986 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1990 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1991 OutboundHTLCState::Committed => included_htlcs += 1,
1992 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1993 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1994 // transaction won't be generated until they send us their next RAA, which will mean
1995 // dropping any HTLCs in this state.
2000 for htlc in context.holding_cell_htlc_updates.iter() {
2002 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2003 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2008 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2009 // ack we're guaranteed to never include them in commitment txs anymore.
2013 let num_htlcs = included_htlcs + addl_htlcs;
2014 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2015 #[cfg(any(test, fuzzing))]
2018 if fee_spike_buffer_htlc.is_some() {
2019 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2021 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2022 + context.holding_cell_htlc_updates.len();
2023 let commitment_tx_info = CommitmentTxInfoCached {
2025 total_pending_htlcs,
2026 next_holder_htlc_id: match htlc.origin {
2027 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2028 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2030 next_counterparty_htlc_id: match htlc.origin {
2031 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2032 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2034 feerate: context.feerate_per_kw,
2036 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2041 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2042 /// pending HTLCs that are on track to be in their next commitment tx
2044 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2045 /// `fee_spike_buffer_htlc` is `Some`.
2047 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2048 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2050 /// Dust HTLCs are excluded.
2051 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2052 let context = &self;
2053 assert!(!context.is_outbound());
2055 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2058 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2059 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2061 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2062 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2064 let mut addl_htlcs = 0;
2065 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2067 HTLCInitiator::LocalOffered => {
2068 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2072 HTLCInitiator::RemoteOffered => {
2073 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2079 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2080 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2081 // committed outbound HTLCs, see below.
2082 let mut included_htlcs = 0;
2083 for ref htlc in context.pending_inbound_htlcs.iter() {
2084 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2087 included_htlcs += 1;
2090 for ref htlc in context.pending_outbound_htlcs.iter() {
2091 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2094 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2095 // i.e. if they've responded to us with an RAA after announcement.
2097 OutboundHTLCState::Committed => included_htlcs += 1,
2098 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2099 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2104 let num_htlcs = included_htlcs + addl_htlcs;
2105 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2106 #[cfg(any(test, fuzzing))]
2109 if fee_spike_buffer_htlc.is_some() {
2110 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2112 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2113 let commitment_tx_info = CommitmentTxInfoCached {
2115 total_pending_htlcs,
2116 next_holder_htlc_id: match htlc.origin {
2117 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2118 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2120 next_counterparty_htlc_id: match htlc.origin {
2121 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2122 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2124 feerate: context.feerate_per_kw,
2126 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2131 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2132 where F: Fn() -> Option<O> {
2133 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2134 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2141 /// Returns the transaction if there is a pending funding transaction that is yet to be
2143 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2144 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2147 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2149 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2150 self.if_unbroadcasted_funding(||
2151 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2155 /// Returns whether the channel is funded in a batch.
2156 pub fn is_batch_funding(&self) -> bool {
2157 self.is_batch_funding.is_some()
2160 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2162 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2163 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2166 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2167 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2168 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2169 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2170 /// immediately (others we will have to allow to time out).
2171 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2172 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2173 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2174 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2175 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2176 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2178 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2179 // return them to fail the payment.
2180 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2181 let counterparty_node_id = self.get_counterparty_node_id();
2182 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2184 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2185 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2190 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2191 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2192 // returning a channel monitor update here would imply a channel monitor update before
2193 // we even registered the channel monitor to begin with, which is invalid.
2194 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2195 // funding transaction, don't return a funding txo (which prevents providing the
2196 // monitor update to the user, even if we return one).
2197 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2198 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2199 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2200 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2201 update_id: self.latest_monitor_update_id,
2202 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2206 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2208 self.channel_state = ChannelState::ShutdownComplete as u32;
2209 self.update_time_counter += 1;
2210 (monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid)
2213 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2214 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2215 let counterparty_keys = self.build_remote_transaction_keys();
2216 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2217 let signature = match &self.holder_signer {
2218 // TODO (taproot|arik): move match into calling method for Taproot
2219 ChannelSignerType::Ecdsa(ecdsa) => {
2220 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2221 .map(|(sig, _)| sig).ok()?
2225 if self.signer_pending_funding {
2226 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2227 self.signer_pending_funding = false;
2230 Some(msgs::FundingCreated {
2231 temporary_channel_id: self.temporary_channel_id.unwrap(),
2232 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2233 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2236 partial_signature_with_nonce: None,
2238 next_local_nonce: None,
2242 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2243 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2244 let counterparty_keys = self.build_remote_transaction_keys();
2245 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2247 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2248 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2249 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2250 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2252 match &self.holder_signer {
2253 // TODO (arik): move match into calling method for Taproot
2254 ChannelSignerType::Ecdsa(ecdsa) => {
2255 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2256 .map(|(signature, _)| msgs::FundingSigned {
2257 channel_id: self.channel_id(),
2260 partial_signature_with_nonce: None,
2264 if funding_signed.is_none() {
2265 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2266 self.signer_pending_funding = true;
2267 } else if self.signer_pending_funding {
2268 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2269 self.signer_pending_funding = false;
2272 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2273 (counterparty_initial_commitment_tx, funding_signed)
2279 // Internal utility functions for channels
2281 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2282 /// `channel_value_satoshis` in msat, set through
2283 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2285 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2287 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2288 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2289 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2291 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2294 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2296 channel_value_satoshis * 10 * configured_percent
2299 /// Returns a minimum channel reserve value the remote needs to maintain,
2300 /// required by us according to the configured or default
2301 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2303 /// Guaranteed to return a value no larger than channel_value_satoshis
2305 /// This is used both for outbound and inbound channels and has lower bound
2306 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2307 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2308 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2309 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2312 /// This is for legacy reasons, present for forward-compatibility.
2313 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2314 /// from storage. Hence, we use this function to not persist default values of
2315 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2316 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2317 let (q, _) = channel_value_satoshis.overflowing_div(100);
2318 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2321 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2322 // Note that num_htlcs should not include dust HTLCs.
2324 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2325 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2328 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2329 // Note that num_htlcs should not include dust HTLCs.
2330 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2331 // Note that we need to divide before multiplying to round properly,
2332 // since the lowest denomination of bitcoin on-chain is the satoshi.
2333 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2336 // Holder designates channel data owned for the benefit of the user client.
2337 // Counterparty designates channel data owned by the another channel participant entity.
2338 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2339 pub context: ChannelContext<SP>,
2342 #[cfg(any(test, fuzzing))]
2343 struct CommitmentTxInfoCached {
2345 total_pending_htlcs: usize,
2346 next_holder_htlc_id: u64,
2347 next_counterparty_htlc_id: u64,
2351 impl<SP: Deref> Channel<SP> where
2352 SP::Target: SignerProvider,
2353 <SP::Target as SignerProvider>::Signer: WriteableEcdsaChannelSigner
2355 fn check_remote_fee<F: Deref, L: Deref>(
2356 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2357 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2358 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2360 // We only bound the fee updates on the upper side to prevent completely absurd feerates,
2361 // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee.
2362 // We generally don't care too much if they set the feerate to something very high, but it
2363 // could result in the channel being useless due to everything being dust. This doesn't
2364 // apply to channels supporting anchor outputs since HTLC transactions are pre-signed with a
2365 // zero fee, so their fee is no longer considered to determine dust limits.
2366 if !channel_type.supports_anchors_zero_fee_htlc_tx() {
2368 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::MaxAllowedNonAnchorChannelRemoteFee) as u64;
2369 if feerate_per_kw as u64 > upper_limit {
2370 return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
2374 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2375 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2377 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2379 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2380 if feerate_per_kw < lower_limit {
2381 if let Some(cur_feerate) = cur_feerate_per_kw {
2382 if feerate_per_kw > cur_feerate {
2384 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2385 cur_feerate, feerate_per_kw);
2389 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2395 fn get_closing_scriptpubkey(&self) -> Script {
2396 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2397 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2398 // outside of those situations will fail.
2399 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2403 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2408 1 + // script length (0)
2412 )*4 + // * 4 for non-witness parts
2413 2 + // witness marker and flag
2414 1 + // witness element count
2415 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2416 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2417 2*(1 + 71); // two signatures + sighash type flags
2418 if let Some(spk) = a_scriptpubkey {
2419 ret += ((8+1) + // output values and script length
2420 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2422 if let Some(spk) = b_scriptpubkey {
2423 ret += ((8+1) + // output values and script length
2424 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2430 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2431 assert!(self.context.pending_inbound_htlcs.is_empty());
2432 assert!(self.context.pending_outbound_htlcs.is_empty());
2433 assert!(self.context.pending_update_fee.is_none());
2435 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2436 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2437 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2439 if value_to_holder < 0 {
2440 assert!(self.context.is_outbound());
2441 total_fee_satoshis += (-value_to_holder) as u64;
2442 } else if value_to_counterparty < 0 {
2443 assert!(!self.context.is_outbound());
2444 total_fee_satoshis += (-value_to_counterparty) as u64;
2447 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2448 value_to_counterparty = 0;
2451 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2452 value_to_holder = 0;
2455 assert!(self.context.shutdown_scriptpubkey.is_some());
2456 let holder_shutdown_script = self.get_closing_scriptpubkey();
2457 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2458 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2460 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2461 (closing_transaction, total_fee_satoshis)
2464 fn funding_outpoint(&self) -> OutPoint {
2465 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2468 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2471 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2472 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2474 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2476 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2477 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2478 where L::Target: Logger {
2479 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2480 // (see equivalent if condition there).
2481 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2482 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2483 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2484 self.context.latest_monitor_update_id = mon_update_id;
2485 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2486 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2490 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2491 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2492 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2493 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2495 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2496 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2498 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2500 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2501 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2502 // these, but for now we just have to treat them as normal.
2504 let mut pending_idx = core::usize::MAX;
2505 let mut htlc_value_msat = 0;
2506 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2507 if htlc.htlc_id == htlc_id_arg {
2508 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).into_inner()));
2509 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2510 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2512 InboundHTLCState::Committed => {},
2513 InboundHTLCState::LocalRemoved(ref reason) => {
2514 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2516 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2517 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2519 return UpdateFulfillFetch::DuplicateClaim {};
2522 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2523 // Don't return in release mode here so that we can update channel_monitor
2527 htlc_value_msat = htlc.amount_msat;
2531 if pending_idx == core::usize::MAX {
2532 #[cfg(any(test, fuzzing))]
2533 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2534 // this is simply a duplicate claim, not previously failed and we lost funds.
2535 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2536 return UpdateFulfillFetch::DuplicateClaim {};
2539 // Now update local state:
2541 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2542 // can claim it even if the channel hits the chain before we see their next commitment.
2543 self.context.latest_monitor_update_id += 1;
2544 let monitor_update = ChannelMonitorUpdate {
2545 update_id: self.context.latest_monitor_update_id,
2546 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2547 payment_preimage: payment_preimage_arg.clone(),
2551 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2552 // Note that this condition is the same as the assertion in
2553 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2554 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2555 // do not not get into this branch.
2556 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2557 match pending_update {
2558 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2559 if htlc_id_arg == htlc_id {
2560 // Make sure we don't leave latest_monitor_update_id incremented here:
2561 self.context.latest_monitor_update_id -= 1;
2562 #[cfg(any(test, fuzzing))]
2563 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2564 return UpdateFulfillFetch::DuplicateClaim {};
2567 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2568 if htlc_id_arg == htlc_id {
2569 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2570 // TODO: We may actually be able to switch to a fulfill here, though its
2571 // rare enough it may not be worth the complexity burden.
2572 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2573 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2579 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2580 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2581 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2583 #[cfg(any(test, fuzzing))]
2584 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2585 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2587 #[cfg(any(test, fuzzing))]
2588 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2591 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2592 if let InboundHTLCState::Committed = htlc.state {
2594 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2595 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2597 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2598 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2601 UpdateFulfillFetch::NewClaim {
2604 msg: Some(msgs::UpdateFulfillHTLC {
2605 channel_id: self.context.channel_id(),
2606 htlc_id: htlc_id_arg,
2607 payment_preimage: payment_preimage_arg,
2612 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2613 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2614 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2615 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2616 // Even if we aren't supposed to let new monitor updates with commitment state
2617 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2618 // matter what. Sadly, to push a new monitor update which flies before others
2619 // already queued, we have to insert it into the pending queue and update the
2620 // update_ids of all the following monitors.
2621 if release_cs_monitor && msg.is_some() {
2622 let mut additional_update = self.build_commitment_no_status_check(logger);
2623 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2624 // to be strictly increasing by one, so decrement it here.
2625 self.context.latest_monitor_update_id = monitor_update.update_id;
2626 monitor_update.updates.append(&mut additional_update.updates);
2628 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2629 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2630 monitor_update.update_id = new_mon_id;
2631 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2632 held_update.update.update_id += 1;
2635 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2636 let update = self.build_commitment_no_status_check(logger);
2637 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2643 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2644 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2646 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2650 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2651 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2652 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2653 /// before we fail backwards.
2655 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2656 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2657 /// [`ChannelError::Ignore`].
2658 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2659 -> Result<(), ChannelError> where L::Target: Logger {
2660 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2661 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2664 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2665 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2666 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2667 /// before we fail backwards.
2669 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2670 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2671 /// [`ChannelError::Ignore`].
2672 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2673 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2674 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2675 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2677 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2679 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2680 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2681 // these, but for now we just have to treat them as normal.
2683 let mut pending_idx = core::usize::MAX;
2684 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2685 if htlc.htlc_id == htlc_id_arg {
2687 InboundHTLCState::Committed => {},
2688 InboundHTLCState::LocalRemoved(ref reason) => {
2689 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2691 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2696 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2697 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2703 if pending_idx == core::usize::MAX {
2704 #[cfg(any(test, fuzzing))]
2705 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2706 // is simply a duplicate fail, not previously failed and we failed-back too early.
2707 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2711 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2712 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2713 force_holding_cell = true;
2716 // Now update local state:
2717 if force_holding_cell {
2718 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2719 match pending_update {
2720 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2721 if htlc_id_arg == htlc_id {
2722 #[cfg(any(test, fuzzing))]
2723 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2727 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2728 if htlc_id_arg == htlc_id {
2729 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2730 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2736 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2737 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2738 htlc_id: htlc_id_arg,
2744 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2746 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2747 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2750 Ok(Some(msgs::UpdateFailHTLC {
2751 channel_id: self.context.channel_id(),
2752 htlc_id: htlc_id_arg,
2757 // Message handlers:
2759 /// Handles a funding_signed message from the remote end.
2760 /// If this call is successful, broadcast the funding transaction (and not before!)
2761 pub fn funding_signed<L: Deref>(
2762 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2763 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::Signer>, ChannelError>
2767 if !self.context.is_outbound() {
2768 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2770 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2771 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2773 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2774 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2775 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2776 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2779 let funding_script = self.context.get_funding_redeemscript();
2781 let counterparty_keys = self.context.build_remote_transaction_keys();
2782 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2783 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2784 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2786 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2787 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2789 // N.B. we'll have acquired the first per-commitment point from the signer during channel
2790 // creation. Verify that the signature from the counterparty is correct so that we've got our
2791 // signed refund transaction if we need to immediately close.
2792 let holder_signer = self.context.build_next_holder_transaction_keys();
2793 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2795 let trusted_tx = initial_commitment_tx.trust();
2796 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2797 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2798 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2799 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2800 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2804 let holder_commitment_tx = HolderCommitmentTransaction::new(
2805 initial_commitment_tx,
2808 &self.context.get_holder_pubkeys().funding_pubkey,
2809 self.context.counterparty_funding_pubkey()
2812 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2813 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2815 let funding_redeemscript = self.context.get_funding_redeemscript();
2816 let funding_txo = self.context.get_funding_txo().unwrap();
2817 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2818 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2819 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2820 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2821 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2822 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2823 shutdown_script, self.context.get_holder_selected_contest_delay(),
2824 &self.context.destination_script, (funding_txo, funding_txo_script),
2825 &self.context.channel_transaction_parameters,
2826 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2828 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2830 channel_monitor.provide_initial_counterparty_commitment_tx(
2831 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2832 self.context.cur_counterparty_commitment_transaction_number,
2833 self.context.counterparty_cur_commitment_point.unwrap(),
2834 counterparty_initial_commitment_tx.feerate_per_kw(),
2835 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2836 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2838 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2839 if self.context.is_batch_funding() {
2840 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2842 self.context.channel_state = ChannelState::FundingSent as u32;
2844 self.context.cur_holder_commitment_transaction_number -= 1;
2845 self.context.update_holder_per_commitment(logger);
2846 self.context.cur_counterparty_commitment_transaction_number -= 1;
2848 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2850 let need_channel_ready = self.check_get_channel_ready(0, logger).is_some();
2851 log_trace!(logger, "funding_signed {} channel_ready", if need_channel_ready { "needs" } else { "does not need" });
2852 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2856 /// Updates the state of the channel to indicate that all channels in the batch have received
2857 /// funding_signed and persisted their monitors.
2858 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2859 /// treated as a non-batch channel going forward.
2860 pub fn set_batch_ready(&mut self) {
2861 self.context.is_batch_funding = None;
2862 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2865 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2866 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2868 pub fn channel_ready<NS: Deref, L: Deref>(
2869 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2870 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2871 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2873 NS::Target: NodeSigner,
2876 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2877 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2878 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2881 if let Some(scid_alias) = msg.short_channel_id_alias {
2882 if Some(scid_alias) != self.context.short_channel_id {
2883 // The scid alias provided can be used to route payments *from* our counterparty,
2884 // i.e. can be used for inbound payments and provided in invoices, but is not used
2885 // when routing outbound payments.
2886 self.context.latest_inbound_scid_alias = Some(scid_alias);
2890 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2892 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2893 // batch, but we can receive channel_ready messages.
2895 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2896 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2898 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2899 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2900 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2901 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2902 self.context.update_time_counter += 1;
2903 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2904 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2905 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2906 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2908 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2909 // required, or they're sending a fresh SCID alias.
2910 let expected_point =
2911 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2912 // If they haven't ever sent an updated point, the point they send should match
2914 self.context.counterparty_cur_commitment_point
2915 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2916 // If we've advanced the commitment number once, the second commitment point is
2917 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2918 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2919 self.context.counterparty_prev_commitment_point
2921 // If they have sent updated points, channel_ready is always supposed to match
2922 // their "first" point, which we re-derive here.
2923 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2924 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2925 ).expect("We already advanced, so previous secret keys should have been validated already")))
2927 if expected_point != Some(msg.next_per_commitment_point) {
2928 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2932 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2935 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2936 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2938 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2940 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2943 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2944 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2945 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2946 ) -> Result<(), ChannelError>
2947 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2948 FE::Target: FeeEstimator, L::Target: Logger,
2950 // We can't accept HTLCs sent after we've sent a shutdown.
2951 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2952 if local_sent_shutdown {
2953 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2955 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2956 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2957 if remote_sent_shutdown {
2958 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2960 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2961 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2963 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2964 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2966 if msg.amount_msat == 0 {
2967 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2969 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2970 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2973 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2974 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2975 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2976 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2978 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2979 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2982 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2983 // the reserve_satoshis we told them to always have as direct payment so that they lose
2984 // something if we punish them for broadcasting an old state).
2985 // Note that we don't really care about having a small/no to_remote output in our local
2986 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2987 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2988 // present in the next commitment transaction we send them (at least for fulfilled ones,
2989 // failed ones won't modify value_to_self).
2990 // Note that we will send HTLCs which another instance of rust-lightning would think
2991 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2992 // Channel state once they will not be present in the next received commitment
2994 let mut removed_outbound_total_msat = 0;
2995 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2996 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2997 removed_outbound_total_msat += htlc.amount_msat;
2998 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2999 removed_outbound_total_msat += htlc.amount_msat;
3003 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3004 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3007 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3008 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3009 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3011 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3012 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3013 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3014 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3015 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3016 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3017 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3021 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3022 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3023 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3024 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3025 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3026 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3027 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3031 let pending_value_to_self_msat =
3032 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3033 let pending_remote_value_msat =
3034 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3035 if pending_remote_value_msat < msg.amount_msat {
3036 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3039 // Check that the remote can afford to pay for this HTLC on-chain at the current
3040 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3042 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3043 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3044 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3046 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3047 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3051 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3052 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3054 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3055 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3059 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3060 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3064 if !self.context.is_outbound() {
3065 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3066 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3067 // side, only on the sender's. Note that with anchor outputs we are no longer as
3068 // sensitive to fee spikes, so we need to account for them.
3069 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3070 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3071 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3072 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3074 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3075 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3076 // the HTLC, i.e. its status is already set to failing.
3077 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3078 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3081 // Check that they won't violate our local required channel reserve by adding this HTLC.
3082 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3083 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3084 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3085 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3088 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3089 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3091 if msg.cltv_expiry >= 500000000 {
3092 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3095 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3096 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3097 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3101 // Now update local state:
3102 self.context.next_counterparty_htlc_id += 1;
3103 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3104 htlc_id: msg.htlc_id,
3105 amount_msat: msg.amount_msat,
3106 payment_hash: msg.payment_hash,
3107 cltv_expiry: msg.cltv_expiry,
3108 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3113 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3115 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3116 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3117 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3118 if htlc.htlc_id == htlc_id {
3119 let outcome = match check_preimage {
3120 None => fail_reason.into(),
3121 Some(payment_preimage) => {
3122 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
3123 if payment_hash != htlc.payment_hash {
3124 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3126 OutboundHTLCOutcome::Success(Some(payment_preimage))
3130 OutboundHTLCState::LocalAnnounced(_) =>
3131 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3132 OutboundHTLCState::Committed => {
3133 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3135 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3136 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3141 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3144 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3145 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3146 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3148 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3149 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3152 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3155 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3156 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3157 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3159 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3160 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3163 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3167 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3168 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3169 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3171 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3172 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3175 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3179 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3180 where L::Target: Logger
3182 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3183 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3185 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3186 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3188 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3189 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3192 let funding_script = self.context.get_funding_redeemscript();
3194 let keys = self.context.build_next_holder_transaction_keys();
3196 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3197 let commitment_txid = {
3198 let trusted_tx = commitment_stats.tx.trust();
3199 let bitcoin_tx = trusted_tx.built_transaction();
3200 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3202 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3203 log_bytes!(msg.signature.serialize_compact()[..]),
3204 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3205 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3206 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3207 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3211 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3213 // If our counterparty updated the channel fee in this commitment transaction, check that
3214 // they can actually afford the new fee now.
3215 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3216 update_state == FeeUpdateState::RemoteAnnounced
3219 debug_assert!(!self.context.is_outbound());
3220 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3221 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3222 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3225 #[cfg(any(test, fuzzing))]
3227 if self.context.is_outbound() {
3228 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3229 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3230 if let Some(info) = projected_commit_tx_info {
3231 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3232 + self.context.holding_cell_htlc_updates.len();
3233 if info.total_pending_htlcs == total_pending_htlcs
3234 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3235 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3236 && info.feerate == self.context.feerate_per_kw {
3237 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3243 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3244 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3247 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3248 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3249 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3250 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3251 // backwards compatibility, we never use it in production. To provide test coverage, here,
3252 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3253 #[allow(unused_assignments, unused_mut)]
3254 let mut separate_nondust_htlc_sources = false;
3255 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3256 use core::hash::{BuildHasher, Hasher};
3257 // Get a random value using the only std API to do so - the DefaultHasher
3258 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3259 separate_nondust_htlc_sources = rand_val % 2 == 0;
3262 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3263 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3264 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3265 if let Some(_) = htlc.transaction_output_index {
3266 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3267 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3268 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3270 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3271 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3272 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3273 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3274 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.serialize()),
3275 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3276 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
3277 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3279 if !separate_nondust_htlc_sources {
3280 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3283 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3285 if separate_nondust_htlc_sources {
3286 if let Some(source) = source_opt.take() {
3287 nondust_htlc_sources.push(source);
3290 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3293 let holder_commitment_tx = HolderCommitmentTransaction::new(
3294 commitment_stats.tx,
3296 msg.htlc_signatures.clone(),
3297 &self.context.get_holder_pubkeys().funding_pubkey,
3298 self.context.counterparty_funding_pubkey()
3301 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3302 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3304 // Update state now that we've passed all the can-fail calls...
3305 let mut need_commitment = false;
3306 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3307 if *update_state == FeeUpdateState::RemoteAnnounced {
3308 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3309 need_commitment = true;
3313 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3314 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3315 Some(forward_info.clone())
3317 if let Some(forward_info) = new_forward {
3318 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3319 &htlc.payment_hash, &self.context.channel_id);
3320 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3321 need_commitment = true;
3324 let mut claimed_htlcs = Vec::new();
3325 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3326 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3327 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3328 &htlc.payment_hash, &self.context.channel_id);
3329 // Grab the preimage, if it exists, instead of cloning
3330 let mut reason = OutboundHTLCOutcome::Success(None);
3331 mem::swap(outcome, &mut reason);
3332 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3333 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3334 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3335 // have a `Success(None)` reason. In this case we could forget some HTLC
3336 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3337 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3339 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3341 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3342 need_commitment = true;
3346 self.context.latest_monitor_update_id += 1;
3347 let mut monitor_update = ChannelMonitorUpdate {
3348 update_id: self.context.latest_monitor_update_id,
3349 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3350 commitment_tx: holder_commitment_tx,
3351 htlc_outputs: htlcs_and_sigs,
3353 nondust_htlc_sources,
3357 self.context.cur_holder_commitment_transaction_number -= 1;
3358 self.context.update_holder_per_commitment(logger);
3360 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3361 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3362 log_debug!(logger, "setting resend_order to CommitmentFirst");
3363 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3365 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3366 // In case we initially failed monitor updating without requiring a response, we need
3367 // to make sure the RAA gets sent first.
3368 self.context.monitor_pending_revoke_and_ack = true;
3369 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3370 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3371 // the corresponding HTLC status updates so that
3372 // get_last_commitment_update_for_send includes the right HTLCs.
3373 self.context.monitor_pending_commitment_signed = true;
3374 let mut additional_update = self.build_commitment_no_status_check(logger);
3375 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3376 // strictly increasing by one, so decrement it here.
3377 self.context.latest_monitor_update_id = monitor_update.update_id;
3378 monitor_update.updates.append(&mut additional_update.updates);
3380 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3381 &self.context.channel_id);
3382 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3385 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3386 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3387 // we'll send one right away when we get the revoke_and_ack when we
3388 // free_holding_cell_htlcs().
3389 let mut additional_update = self.build_commitment_no_status_check(logger);
3390 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3391 // strictly increasing by one, so decrement it here.
3392 self.context.latest_monitor_update_id = monitor_update.update_id;
3393 monitor_update.updates.append(&mut additional_update.updates);
3397 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3398 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3399 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3400 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3403 /// Public version of the below, checking relevant preconditions first.
3404 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3405 /// returns `(None, Vec::new())`.
3406 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3407 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3408 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3409 where F::Target: FeeEstimator, L::Target: Logger
3411 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3412 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3413 self.free_holding_cell_htlcs(fee_estimator, logger)
3414 } else { (None, Vec::new()) }
3417 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3418 /// for our counterparty.
3419 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3420 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3421 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3422 where F::Target: FeeEstimator, L::Target: Logger
3424 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3425 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3426 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3427 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3429 let mut monitor_update = ChannelMonitorUpdate {
3430 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3431 updates: Vec::new(),
3434 let mut htlc_updates = Vec::new();
3435 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3436 let mut update_add_count = 0;
3437 let mut update_fulfill_count = 0;
3438 let mut update_fail_count = 0;
3439 let mut htlcs_to_fail = Vec::new();
3440 for htlc_update in htlc_updates.drain(..) {
3441 // Note that this *can* fail, though it should be due to rather-rare conditions on
3442 // fee races with adding too many outputs which push our total payments just over
3443 // the limit. In case it's less rare than I anticipate, we may want to revisit
3444 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3445 // to rebalance channels.
3446 match &htlc_update {
3447 &HTLCUpdateAwaitingACK::AddHTLC {
3448 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3449 skimmed_fee_msat, ..
3451 match self.send_htlc(amount_msat, *payment_hash, cltv_expiry, source.clone(),
3452 onion_routing_packet.clone(), false, skimmed_fee_msat, fee_estimator, logger)
3454 Ok(_) => update_add_count += 1,
3457 ChannelError::Ignore(ref msg) => {
3458 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3459 // If we fail to send here, then this HTLC should
3460 // be failed backwards. Failing to send here
3461 // indicates that this HTLC may keep being put back
3462 // into the holding cell without ever being
3463 // successfully forwarded/failed/fulfilled, causing
3464 // our counterparty to eventually close on us.
3465 htlcs_to_fail.push((source.clone(), *payment_hash));
3468 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3474 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3475 // If an HTLC claim was previously added to the holding cell (via
3476 // `get_update_fulfill_htlc`, then generating the claim message itself must
3477 // not fail - any in between attempts to claim the HTLC will have resulted
3478 // in it hitting the holding cell again and we cannot change the state of a
3479 // holding cell HTLC from fulfill to anything else.
3480 let mut additional_monitor_update =
3481 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3482 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3483 { monitor_update } else { unreachable!() };
3484 update_fulfill_count += 1;
3485 monitor_update.updates.append(&mut additional_monitor_update.updates);
3487 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3488 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3489 Ok(update_fail_msg_option) => {
3490 // If an HTLC failure was previously added to the holding cell (via
3491 // `queue_fail_htlc`) then generating the fail message itself must
3492 // not fail - we should never end up in a state where we double-fail
3493 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3494 // for a full revocation before failing.
3495 debug_assert!(update_fail_msg_option.is_some());
3496 update_fail_count += 1;
3499 if let ChannelError::Ignore(_) = e {}
3501 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3508 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3509 return (None, htlcs_to_fail);
3511 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3512 self.send_update_fee(feerate, false, fee_estimator, logger)
3517 let mut additional_update = self.build_commitment_no_status_check(logger);
3518 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3519 // but we want them to be strictly increasing by one, so reset it here.
3520 self.context.latest_monitor_update_id = monitor_update.update_id;
3521 monitor_update.updates.append(&mut additional_update.updates);
3523 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3524 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3525 update_add_count, update_fulfill_count, update_fail_count);
3527 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3528 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3534 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3535 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3536 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3537 /// generating an appropriate error *after* the channel state has been updated based on the
3538 /// revoke_and_ack message.
3539 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3540 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3541 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3542 where F::Target: FeeEstimator, L::Target: Logger,
3544 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3545 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3547 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3548 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3550 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3551 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3554 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3556 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3557 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3558 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3562 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3563 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3564 // haven't given them a new commitment transaction to broadcast). We should probably
3565 // take advantage of this by updating our channel monitor, sending them an error, and
3566 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3567 // lot of work, and there's some chance this is all a misunderstanding anyway.
3568 // We have to do *something*, though, since our signer may get mad at us for otherwise
3569 // jumping a remote commitment number, so best to just force-close and move on.
3570 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3573 #[cfg(any(test, fuzzing))]
3575 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3576 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3579 match &self.context.holder_signer {
3580 ChannelSignerType::Ecdsa(ecdsa) => {
3581 ecdsa.validate_counterparty_revocation(
3582 self.context.cur_counterparty_commitment_transaction_number + 1,
3584 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3588 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3589 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3590 self.context.latest_monitor_update_id += 1;
3591 let mut monitor_update = ChannelMonitorUpdate {
3592 update_id: self.context.latest_monitor_update_id,
3593 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3594 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3595 secret: msg.per_commitment_secret,
3599 // Update state now that we've passed all the can-fail calls...
3600 // (note that we may still fail to generate the new commitment_signed message, but that's
3601 // OK, we step the channel here and *then* if the new generation fails we can fail the
3602 // channel based on that, but stepping stuff here should be safe either way.
3603 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3604 self.context.sent_message_awaiting_response = None;
3605 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3606 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3607 self.context.cur_counterparty_commitment_transaction_number -= 1;
3609 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3610 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3613 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3614 let mut to_forward_infos = Vec::new();
3615 let mut revoked_htlcs = Vec::new();
3616 let mut finalized_claimed_htlcs = Vec::new();
3617 let mut update_fail_htlcs = Vec::new();
3618 let mut update_fail_malformed_htlcs = Vec::new();
3619 let mut require_commitment = false;
3620 let mut value_to_self_msat_diff: i64 = 0;
3623 // Take references explicitly so that we can hold multiple references to self.context.
3624 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3625 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3627 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3628 pending_inbound_htlcs.retain(|htlc| {
3629 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3630 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3631 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3632 value_to_self_msat_diff += htlc.amount_msat as i64;
3637 pending_outbound_htlcs.retain(|htlc| {
3638 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3639 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3640 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3641 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3643 finalized_claimed_htlcs.push(htlc.source.clone());
3644 // They fulfilled, so we sent them money
3645 value_to_self_msat_diff -= htlc.amount_msat as i64;
3650 for htlc in pending_inbound_htlcs.iter_mut() {
3651 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3653 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3657 let mut state = InboundHTLCState::Committed;
3658 mem::swap(&mut state, &mut htlc.state);
3660 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3661 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3662 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3663 require_commitment = true;
3664 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3665 match forward_info {
3666 PendingHTLCStatus::Fail(fail_msg) => {
3667 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3668 require_commitment = true;
3670 HTLCFailureMsg::Relay(msg) => {
3671 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3672 update_fail_htlcs.push(msg)
3674 HTLCFailureMsg::Malformed(msg) => {
3675 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3676 update_fail_malformed_htlcs.push(msg)
3680 PendingHTLCStatus::Forward(forward_info) => {
3681 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3682 to_forward_infos.push((forward_info, htlc.htlc_id));
3683 htlc.state = InboundHTLCState::Committed;
3689 for htlc in pending_outbound_htlcs.iter_mut() {
3690 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3691 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3692 htlc.state = OutboundHTLCState::Committed;
3694 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3695 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3696 // Grab the preimage, if it exists, instead of cloning
3697 let mut reason = OutboundHTLCOutcome::Success(None);
3698 mem::swap(outcome, &mut reason);
3699 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3700 require_commitment = true;
3704 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3706 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3707 match update_state {
3708 FeeUpdateState::Outbound => {
3709 debug_assert!(self.context.is_outbound());
3710 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3711 self.context.feerate_per_kw = feerate;
3712 self.context.pending_update_fee = None;
3714 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3715 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3716 debug_assert!(!self.context.is_outbound());
3717 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3718 require_commitment = true;
3719 self.context.feerate_per_kw = feerate;
3720 self.context.pending_update_fee = None;
3725 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3726 let release_state_str =
3727 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3728 macro_rules! return_with_htlcs_to_fail {
3729 ($htlcs_to_fail: expr) => {
3730 if !release_monitor {
3731 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3732 update: monitor_update,
3734 return Ok(($htlcs_to_fail, None));
3736 return Ok(($htlcs_to_fail, Some(monitor_update)));
3741 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3742 // We can't actually generate a new commitment transaction (incl by freeing holding
3743 // cells) while we can't update the monitor, so we just return what we have.
3744 if require_commitment {
3745 self.context.monitor_pending_commitment_signed = true;
3746 // When the monitor updating is restored we'll call
3747 // get_last_commitment_update_for_send(), which does not update state, but we're
3748 // definitely now awaiting a remote revoke before we can step forward any more, so
3750 let mut additional_update = self.build_commitment_no_status_check(logger);
3751 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3752 // strictly increasing by one, so decrement it here.
3753 self.context.latest_monitor_update_id = monitor_update.update_id;
3754 monitor_update.updates.append(&mut additional_update.updates);
3756 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3757 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3758 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3759 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3760 return_with_htlcs_to_fail!(Vec::new());
3763 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3764 (Some(mut additional_update), htlcs_to_fail) => {
3765 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3766 // strictly increasing by one, so decrement it here.
3767 self.context.latest_monitor_update_id = monitor_update.update_id;
3768 monitor_update.updates.append(&mut additional_update.updates);
3770 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3771 &self.context.channel_id(), release_state_str);
3773 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3774 return_with_htlcs_to_fail!(htlcs_to_fail);
3776 (None, htlcs_to_fail) => {
3777 if require_commitment {
3778 let mut additional_update = self.build_commitment_no_status_check(logger);
3780 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3781 // strictly increasing by one, so decrement it here.
3782 self.context.latest_monitor_update_id = monitor_update.update_id;
3783 monitor_update.updates.append(&mut additional_update.updates);
3785 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3786 &self.context.channel_id(),
3787 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3790 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3791 return_with_htlcs_to_fail!(htlcs_to_fail);
3793 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3794 &self.context.channel_id(), release_state_str);
3796 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3797 return_with_htlcs_to_fail!(htlcs_to_fail);
3803 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3804 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3805 /// commitment update.
3806 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3807 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3808 where F::Target: FeeEstimator, L::Target: Logger
3810 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3811 assert!(msg_opt.is_none(), "We forced holding cell?");
3814 /// Adds a pending update to this channel. See the doc for send_htlc for
3815 /// further details on the optionness of the return value.
3816 /// If our balance is too low to cover the cost of the next commitment transaction at the
3817 /// new feerate, the update is cancelled.
3819 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3820 /// [`Channel`] if `force_holding_cell` is false.
3821 fn send_update_fee<F: Deref, L: Deref>(
3822 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3823 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3824 ) -> Option<msgs::UpdateFee>
3825 where F::Target: FeeEstimator, L::Target: Logger
3827 if !self.context.is_outbound() {
3828 panic!("Cannot send fee from inbound channel");
3830 if !self.context.is_usable() {
3831 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3833 if !self.context.is_live() {
3834 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3837 // Before proposing a feerate update, check that we can actually afford the new fee.
3838 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3839 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3840 let keys = self.context.build_next_holder_transaction_keys();
3841 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3842 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3843 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3844 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3845 //TODO: auto-close after a number of failures?
3846 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3850 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3851 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3852 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3853 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3854 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3855 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3858 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3859 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3863 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3864 force_holding_cell = true;
3867 if force_holding_cell {
3868 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3872 debug_assert!(self.context.pending_update_fee.is_none());
3873 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3875 Some(msgs::UpdateFee {
3876 channel_id: self.context.channel_id,
3881 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3882 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3884 /// No further message handling calls may be made until a channel_reestablish dance has
3886 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3887 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3888 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3889 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3893 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3894 // While the below code should be idempotent, it's simpler to just return early, as
3895 // redundant disconnect events can fire, though they should be rare.
3899 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3900 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3903 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3904 // will be retransmitted.
3905 self.context.last_sent_closing_fee = None;
3906 self.context.pending_counterparty_closing_signed = None;
3907 self.context.closing_fee_limits = None;
3909 let mut inbound_drop_count = 0;
3910 self.context.pending_inbound_htlcs.retain(|htlc| {
3912 InboundHTLCState::RemoteAnnounced(_) => {
3913 // They sent us an update_add_htlc but we never got the commitment_signed.
3914 // We'll tell them what commitment_signed we're expecting next and they'll drop
3915 // this HTLC accordingly
3916 inbound_drop_count += 1;
3919 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3920 // We received a commitment_signed updating this HTLC and (at least hopefully)
3921 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3922 // in response to it yet, so don't touch it.
3925 InboundHTLCState::Committed => true,
3926 InboundHTLCState::LocalRemoved(_) => {
3927 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3928 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3929 // (that we missed). Keep this around for now and if they tell us they missed
3930 // the commitment_signed we can re-transmit the update then.
3935 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3937 if let Some((_, update_state)) = self.context.pending_update_fee {
3938 if update_state == FeeUpdateState::RemoteAnnounced {
3939 debug_assert!(!self.context.is_outbound());
3940 self.context.pending_update_fee = None;
3944 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3945 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3946 // They sent us an update to remove this but haven't yet sent the corresponding
3947 // commitment_signed, we need to move it back to Committed and they can re-send
3948 // the update upon reconnection.
3949 htlc.state = OutboundHTLCState::Committed;
3953 self.context.sent_message_awaiting_response = None;
3955 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3956 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3960 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3961 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3962 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3963 /// update completes (potentially immediately).
3964 /// The messages which were generated with the monitor update must *not* have been sent to the
3965 /// remote end, and must instead have been dropped. They will be regenerated when
3966 /// [`Self::monitor_updating_restored`] is called.
3968 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3969 /// [`chain::Watch`]: crate::chain::Watch
3970 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3971 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3972 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3973 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3974 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3976 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3977 self.context.monitor_pending_commitment_signed |= resend_commitment;
3978 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3979 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3980 self.context.monitor_pending_failures.append(&mut pending_fails);
3981 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3982 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3985 /// Indicates that the latest ChannelMonitor update has been committed by the client
3986 /// successfully and we should restore normal operation. Returns messages which should be sent
3987 /// to the remote side.
3988 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3989 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3990 user_config: &UserConfig, best_block_height: u32
3991 ) -> MonitorRestoreUpdates
3994 NS::Target: NodeSigner
3996 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3997 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3999 // If we're past (or at) the FundingSent stage on an outbound channel, try to
4000 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4001 // first received the funding_signed.
4002 let mut funding_broadcastable =
4003 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
4004 self.context.funding_transaction.take()
4006 // That said, if the funding transaction is already confirmed (ie we're active with a
4007 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4008 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
4009 funding_broadcastable = None;
4012 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4013 // (and we assume the user never directly broadcasts the funding transaction and waits for
4014 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4015 // * an inbound channel that failed to persist the monitor on funding_created and we got
4016 // the funding transaction confirmed before the monitor was persisted, or
4017 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4018 let channel_ready = if self.context.monitor_pending_channel_ready {
4019 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4020 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4021 self.context.monitor_pending_channel_ready = false;
4022 self.get_channel_ready().or_else(|| {
4023 log_trace!(logger, "Monitor was pending channel_ready with no commitment point available; setting signer_pending_channel_ready");
4024 self.context.signer_pending_channel_ready = true;
4029 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4031 let mut accepted_htlcs = Vec::new();
4032 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4033 let mut failed_htlcs = Vec::new();
4034 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4035 let mut finalized_claimed_htlcs = Vec::new();
4036 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4038 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
4039 self.context.monitor_pending_revoke_and_ack = false;
4040 self.context.monitor_pending_commitment_signed = false;
4041 return MonitorRestoreUpdates {
4042 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4043 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4047 let raa = if self.context.monitor_pending_revoke_and_ack {
4048 self.get_last_revoke_and_ack(logger).or_else(|| {
4049 log_trace!(logger, "Monitor was pending RAA, but RAA is not available; setting signer_pending_revoke_and_ack");
4050 self.context.signer_pending_revoke_and_ack = true;
4054 let commitment_update = if self.context.monitor_pending_commitment_signed {
4055 self.get_last_commitment_update_for_send(logger).ok()
4057 if commitment_update.is_some() {
4058 self.mark_awaiting_response();
4061 if self.context.monitor_pending_commitment_signed && commitment_update.is_none() {
4062 log_trace!(logger, "Monitor was pending_commitment_signed with no commitment update available; setting signer_pending_commitment_update");
4063 self.context.signer_pending_commitment_update = true;
4065 // If the signer was pending a commitment update, but we happened to get one just now because
4066 // the monitor retrieved it, then we can mark the signer as "not pending anymore".
4067 if self.context.signer_pending_commitment_update && commitment_update.is_some() {
4068 log_trace!(logger, "Signer was pending commitment update, monitor retrieved it: clearing signer_pending_commitment_update");
4069 self.context.signer_pending_commitment_update = false;
4072 if self.context.monitor_pending_revoke_and_ack && raa.is_none() {
4073 log_trace!(logger, "Monitor was pending_revoke_and_ack with no RAA available; setting signer_pending_revoke_and_ack");
4074 self.context.signer_pending_revoke_and_ack = true;
4076 // If the signer was pending a RAA, but we happened to get one just now because the monitor
4077 // retrieved it, then we can mark the signer as "not pending anymore".
4078 if self.context.signer_pending_revoke_and_ack && raa.is_some() {
4079 log_trace!(logger, "Signer was pending RAA, monitor retrived it: clearing signer_pending_revoke_and_ack");
4080 self.context.signer_pending_revoke_and_ack = false;
4084 self.context.monitor_pending_revoke_and_ack = false;
4085 self.context.monitor_pending_commitment_signed = false;
4087 let order = self.context.resend_order.clone();
4088 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA{}",
4089 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4090 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4091 if commitment_update.is_some() && raa.is_some() {
4092 match order { RAACommitmentOrder::CommitmentFirst => ", with commitment first", RAACommitmentOrder::RevokeAndACKFirst => ", with RAA first"}
4094 MonitorRestoreUpdates {
4095 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4099 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4100 where F::Target: FeeEstimator, L::Target: Logger
4102 if self.context.is_outbound() {
4103 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4105 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4106 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4108 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4109 let feerate_over_dust_buffer = msg.feerate_per_kw > self.context.get_dust_buffer_feerate(None);
4111 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4112 self.context.update_time_counter += 1;
4113 // If the feerate has increased over the previous dust buffer (note that
4114 // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we
4115 // won't be pushed over our dust exposure limit by the feerate increase.
4116 if feerate_over_dust_buffer {
4117 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4118 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4119 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4120 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4121 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4122 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4123 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4124 msg.feerate_per_kw, holder_tx_dust_exposure)));
4126 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4127 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4128 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4134 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4137 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4138 log_trace!(logger, "Signing unblocked in channel {} at sequence {}",
4139 &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number);
4141 if self.context.signer_pending_commitment_point || self.context.signer_pending_released_secret {
4142 log_trace!(logger, "Attempting to update holder per-commitment for pending commitment point and secret...");
4143 self.context.update_holder_per_commitment(logger);
4146 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
4147 log_trace!(logger, "Peer is disconnected; no unblocked messages to send.");
4148 return SignerResumeUpdates::default()
4151 // Make sure that we honor any ordering requirements between the commitment update and revoke-and-ack.
4152 let (commitment_update, raa) = match &self.context.resend_order {
4153 RAACommitmentOrder::CommitmentFirst => {
4154 let cu = if self.context.signer_pending_commitment_update {
4155 log_trace!(logger, "Attempting to generate pending commitment update...");
4156 self.get_last_commitment_update_for_send(logger).map(|cu| {
4157 log_trace!(logger, "Generated commitment update; clearing signer_pending_commitment_update");
4158 self.context.signer_pending_commitment_update = false;
4163 let raa = if self.context.signer_pending_revoke_and_ack && !self.context.signer_pending_commitment_update {
4164 log_trace!(logger, "Attempting to generate pending RAA...");
4165 self.get_last_revoke_and_ack(logger).map(|raa| {
4166 log_trace!(logger, "Generated RAA; clearing signer_pending_revoke_and_ack");
4167 self.context.signer_pending_revoke_and_ack = false;
4175 RAACommitmentOrder::RevokeAndACKFirst => {
4176 let raa = if self.context.signer_pending_revoke_and_ack {
4177 log_trace!(logger, "Attempting to generate pending RAA...");
4178 self.get_last_revoke_and_ack(logger).map(|raa| {
4179 log_trace!(logger, "Generated RAA; clearing signer_pending_revoke_and_ack");
4180 self.context.signer_pending_revoke_and_ack = false;
4185 let cu = if self.context.signer_pending_commitment_update && !self.context.signer_pending_revoke_and_ack {
4186 log_trace!(logger, "Attempting to generate pending commitment update...");
4187 self.get_last_commitment_update_for_send(logger).map(|cu| {
4188 log_trace!(logger, "Generated commitment update; clearing signer_pending_commitment_update");
4189 self.context.signer_pending_commitment_update = false;
4198 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4199 log_trace!(logger, "Attempting to generate pending funding signed...");
4200 self.context.get_funding_signed_msg(logger).1
4202 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4203 log_trace!(logger, "Attempting to generate pending funding created...");
4204 self.context.get_funding_created_msg(logger)
4207 // Don't yield up a `channel_ready` message if we're still pending funding.
4208 let channel_ready = if self.context.signer_pending_channel_ready && !self.context.signer_pending_funding {
4209 log_trace!(logger, "Attempting to generate pending channel ready...");
4210 self.get_channel_ready().map(|msg| {
4211 log_trace!(logger, "Generated channel_ready; clearing signer_pending_channel_ready");
4212 self.context.signer_pending_channel_ready = false;
4217 let order = self.context.resend_order.clone();
4219 log_debug!(logger, "Signing unblocked in channel {} at sequence {} resulted in {} commitment update, {} RAA{}, {} funding signed, {} funding created, {} channel ready",
4220 &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number,
4221 if commitment_update.is_some() { "a" } else { "no" },
4222 if raa.is_some() { "an" } else { "no" },
4223 if commitment_update.is_some() && raa.is_some() {
4224 if order == RAACommitmentOrder::CommitmentFirst { " (commitment first)" } else { " (RAA first)" }
4226 if funding_signed.is_some() { "a" } else { "no" },
4227 if funding_created.is_some() { "a" } else { "no" },
4228 if channel_ready.is_some() { "a" } else { "no" });
4230 SignerResumeUpdates {
4240 fn get_last_revoke_and_ack<L: Deref>(&self, logger: &L) -> Option<msgs::RevokeAndACK> where L::Target: Logger {
4241 assert!(self.context.cur_holder_commitment_transaction_number <= INITIAL_COMMITMENT_NUMBER + 2);
4242 match (self.context.cur_holder_commitment_point, self.context.prev_holder_commitment_secret) {
4243 (Some(next_per_commitment_point), Some(per_commitment_secret)) => {
4244 log_debug!(logger, "Regenerated last revoke-and-ack in channel {} for next per-commitment point sequence number {}, releasing secret for {}",
4245 &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number,
4246 self.context.cur_holder_commitment_transaction_number + 2);
4248 Some(msgs::RevokeAndACK {
4249 channel_id: self.context.channel_id,
4250 per_commitment_secret,
4251 next_per_commitment_point,
4253 next_local_nonce: None,
4257 (Some(_), None) => {
4258 log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the secret for {} is not available",
4259 &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number,
4260 self.context.cur_holder_commitment_transaction_number + 2);
4264 (None, Some(_)) => {
4265 log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because the next per-commitment point is not available",
4266 &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number);
4271 log_debug!(logger, "Last revoke-and-ack pending in channel {} for sequence {} because neither the next per-commitment point nor the secret for {} is available",
4272 &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number,
4273 self.context.cur_holder_commitment_transaction_number + 2);
4279 fn get_channel_ready(&self) -> Option<msgs::ChannelReady> {
4280 self.context.cur_holder_commitment_point.map(|next_per_commitment_point| {
4281 msgs::ChannelReady {
4282 channel_id: self.context.channel_id(),
4283 next_per_commitment_point,
4284 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4289 /// Gets the last commitment update for immediate sending to our peer.
4290 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4291 let mut update_add_htlcs = Vec::new();
4292 let mut update_fulfill_htlcs = Vec::new();
4293 let mut update_fail_htlcs = Vec::new();
4294 let mut update_fail_malformed_htlcs = Vec::new();
4296 for htlc in self.context.pending_outbound_htlcs.iter() {
4297 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4298 update_add_htlcs.push(msgs::UpdateAddHTLC {
4299 channel_id: self.context.channel_id(),
4300 htlc_id: htlc.htlc_id,
4301 amount_msat: htlc.amount_msat,
4302 payment_hash: htlc.payment_hash,
4303 cltv_expiry: htlc.cltv_expiry,
4304 onion_routing_packet: (**onion_packet).clone(),
4305 skimmed_fee_msat: htlc.skimmed_fee_msat,
4310 for htlc in self.context.pending_inbound_htlcs.iter() {
4311 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4313 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4314 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4315 channel_id: self.context.channel_id(),
4316 htlc_id: htlc.htlc_id,
4317 reason: err_packet.clone()
4320 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4321 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4322 channel_id: self.context.channel_id(),
4323 htlc_id: htlc.htlc_id,
4324 sha256_of_onion: sha256_of_onion.clone(),
4325 failure_code: failure_code.clone(),
4328 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4329 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4330 channel_id: self.context.channel_id(),
4331 htlc_id: htlc.htlc_id,
4332 payment_preimage: payment_preimage.clone(),
4339 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4340 Some(msgs::UpdateFee {
4341 channel_id: self.context.channel_id(),
4342 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4346 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4347 if self.context.signer_pending_commitment_update {
4348 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4349 self.context.signer_pending_commitment_update = false;
4353 if !self.context.signer_pending_commitment_update {
4354 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4355 self.context.signer_pending_commitment_update = true;
4359 log_debug!(logger, "Regenerated latest commitment update in channel {} at {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4360 &self.context.channel_id(), self.context.cur_holder_commitment_transaction_number, if update_fee.is_some() { " update_fee," } else { "" },
4361 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4362 Ok(msgs::CommitmentUpdate {
4363 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4368 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4369 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4370 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4371 assert!(self.context.shutdown_scriptpubkey.is_some());
4372 Some(msgs::Shutdown {
4373 channel_id: self.context.channel_id,
4374 scriptpubkey: self.get_closing_scriptpubkey(),
4379 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4380 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4382 /// Some links printed in log lines are included here to check them during build (when run with
4383 /// `cargo doc --document-private-items`):
4384 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4385 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4386 pub fn channel_reestablish<L: Deref, NS: Deref>(
4387 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4388 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4389 ) -> Result<ReestablishResponses, ChannelError>
4392 NS::Target: NodeSigner
4394 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4395 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4396 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4397 // just close here instead of trying to recover.
4398 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4401 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4402 msg.next_local_commitment_number == 0 {
4403 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4406 if msg.next_remote_commitment_number > 0 {
4407 // TODO(waterson): figure out how to do this verification when an async signer is provided
4408 // with a (more or less) arbitrary state index. Should we require that an async signer cache
4409 // old points? Or should we make it so that we can restart the re-establish after the signer
4410 // becomes unblocked? Or something else?
4412 let state_index = INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1;
4413 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(state_index, &self.context.secp_ctx)
4414 .map_err(|_| ChannelError::Close(format!("Unable to retrieve per-commitment point for state {}", state_index)))?;
4415 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4416 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4417 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4418 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4421 if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4422 macro_rules! log_and_panic {
4423 ($err_msg: expr) => {
4424 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4425 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4428 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4429 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4430 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4431 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4432 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4433 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4434 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4435 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4439 // Before we change the state of the channel, we check if the peer is sending a very old
4440 // commitment transaction number, if yes we send a warning message.
4441 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4442 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4444 ChannelError::Warn(format!("Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)", msg.next_remote_commitment_number, our_commitment_transaction))
4448 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4449 // remaining cases either succeed or ErrorMessage-fail).
4450 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4451 self.context.sent_message_awaiting_response = None;
4453 let shutdown_msg = self.get_outbound_shutdown();
4455 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4457 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4458 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4459 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4460 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4461 if msg.next_remote_commitment_number != 0 {
4462 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4464 // Short circuit the whole handler as there is nothing we can resend them
4465 return Ok(ReestablishResponses {
4466 channel_ready: None,
4467 raa: None, commitment_update: None,
4468 order: RAACommitmentOrder::CommitmentFirst,
4469 shutdown_msg, announcement_sigs,
4473 // We have OurChannelReady set!
4474 let channel_ready = self.get_channel_ready();
4475 if channel_ready.is_none() {
4476 log_trace!(logger, "Could not generate channel_ready during channel_reestablish; setting signer_pending_channel_ready");
4477 self.context.signer_pending_channel_ready = true;
4480 return Ok(ReestablishResponses {
4482 raa: None, commitment_update: None,
4483 order: RAACommitmentOrder::CommitmentFirst,
4484 shutdown_msg, announcement_sigs,
4488 let required_revoke = if msg.next_remote_commitment_number + 1 == INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number {
4489 // Remote isn't waiting on any RevokeAndACK from us!
4490 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4492 } else if msg.next_remote_commitment_number + 1 == (INITIAL_COMMITMENT_NUMBER - 1) - self.context.cur_holder_commitment_transaction_number {
4493 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4494 self.context.monitor_pending_revoke_and_ack = true;
4497 self.get_last_revoke_and_ack(logger).map(|raa| {
4498 if self.context.signer_pending_revoke_and_ack {
4499 log_trace!(logger, "Generated RAA for channel_reestablish; clearing signer_pending_revoke_and_ack");
4500 self.context.signer_pending_revoke_and_ack = false;
4504 if !self.context.signer_pending_revoke_and_ack {
4505 log_trace!(logger, "Unable to generate RAA for channel_reestablish; setting signer_pending_revoke_and_ack");
4506 self.context.signer_pending_revoke_and_ack = true;
4512 return Err(ChannelError::Close("Peer attempted to reestablish channel with a very old local commitment transaction".to_owned()));
4515 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4516 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4517 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4518 // the corresponding revoke_and_ack back yet.
4519 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4520 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4521 self.mark_awaiting_response();
4523 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4525 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4526 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4527 log_debug!(logger, "Reconnecting channel at state 1, (re?)sending channel_ready");
4528 self.get_channel_ready().or_else(|| {
4529 if !self.context.signer_pending_channel_ready {
4530 log_trace!(logger, "Unable to generate channel_ready for channel_reestablish; setting signer_pending_channel_ready");
4531 self.context.signer_pending_channel_ready = true;
4537 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4538 if required_revoke.is_some() {
4539 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4541 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4544 Ok(ReestablishResponses {
4545 channel_ready, shutdown_msg, announcement_sigs,
4546 raa: required_revoke,
4547 commitment_update: None,
4548 order: self.context.resend_order.clone(),
4550 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4551 if required_revoke.is_some() {
4552 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4554 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4557 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4558 self.context.monitor_pending_commitment_signed = true;
4559 Ok(ReestablishResponses {
4560 channel_ready, shutdown_msg, announcement_sigs,
4561 commitment_update: None, raa: None,
4562 order: self.context.resend_order.clone(),
4565 Ok(ReestablishResponses {
4566 channel_ready, shutdown_msg, announcement_sigs,
4567 raa: required_revoke,
4568 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4569 order: self.context.resend_order.clone(),
4573 Err(ChannelError::Close("Peer attempted to reestablish channel with a very old remote commitment transaction".to_owned()))
4577 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4578 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4579 /// at which point they will be recalculated.
4580 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4582 where F::Target: FeeEstimator
4584 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4586 // Propose a range from our current Background feerate to our Normal feerate plus our
4587 // force_close_avoidance_max_fee_satoshis.
4588 // If we fail to come to consensus, we'll have to force-close.
4589 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4590 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4591 // that we don't expect to need fee bumping
4592 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4593 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4595 // The spec requires that (when the channel does not have anchors) we only send absolute
4596 // channel fees no greater than the absolute channel fee on the current commitment
4597 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4598 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4599 // some force-closure by old nodes, but we wanted to close the channel anyway.
4601 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4602 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4603 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4604 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4607 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4608 // below our dust limit, causing the output to disappear. We don't bother handling this
4609 // case, however, as this should only happen if a channel is closed before any (material)
4610 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4611 // come to consensus with our counterparty on appropriate fees, however it should be a
4612 // relatively rare case. We can revisit this later, though note that in order to determine
4613 // if the funders' output is dust we have to know the absolute fee we're going to use.
4614 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4615 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4616 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4617 // We always add force_close_avoidance_max_fee_satoshis to our normal
4618 // feerate-calculated fee, but allow the max to be overridden if we're using a
4619 // target feerate-calculated fee.
4620 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4621 proposed_max_feerate as u64 * tx_weight / 1000)
4623 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4626 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4627 self.context.closing_fee_limits.clone().unwrap()
4630 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4631 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4632 /// this point if we're the funder we should send the initial closing_signed, and in any case
4633 /// shutdown should complete within a reasonable timeframe.
4634 fn closing_negotiation_ready(&self) -> bool {
4635 self.context.closing_negotiation_ready()
4638 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4639 /// an Err if no progress is being made and the channel should be force-closed instead.
4640 /// Should be called on a one-minute timer.
4641 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4642 if self.closing_negotiation_ready() {
4643 if self.context.closing_signed_in_flight {
4644 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4646 self.context.closing_signed_in_flight = true;
4652 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4653 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4654 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4655 where F::Target: FeeEstimator, L::Target: Logger
4657 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4658 return Ok((None, None));
4661 if !self.context.is_outbound() {
4662 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4663 return self.closing_signed(fee_estimator, &msg);
4665 return Ok((None, None));
4668 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4670 assert!(self.context.shutdown_scriptpubkey.is_some());
4671 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4672 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4673 our_min_fee, our_max_fee, total_fee_satoshis);
4675 match &self.context.holder_signer {
4676 ChannelSignerType::Ecdsa(ecdsa) => {
4678 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4679 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4681 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4682 Ok((Some(msgs::ClosingSigned {
4683 channel_id: self.context.channel_id,
4684 fee_satoshis: total_fee_satoshis,
4686 fee_range: Some(msgs::ClosingSignedFeeRange {
4687 min_fee_satoshis: our_min_fee,
4688 max_fee_satoshis: our_max_fee,
4695 // Marks a channel as waiting for a response from the counterparty. If it's not received
4696 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4698 fn mark_awaiting_response(&mut self) {
4699 self.context.sent_message_awaiting_response = Some(0);
4702 /// Determines whether we should disconnect the counterparty due to not receiving a response
4703 /// within our expected timeframe.
4705 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4706 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4707 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4710 // Don't disconnect when we're not waiting on a response.
4713 *ticks_elapsed += 1;
4714 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4718 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4719 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4721 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4722 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4724 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4725 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4726 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4727 // can do that via error message without getting a connection fail anyway...
4728 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4730 for htlc in self.context.pending_inbound_htlcs.iter() {
4731 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4732 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4735 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4737 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4738 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
4741 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4742 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4743 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_bytes().to_hex())));
4746 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4749 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4750 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4751 // any further commitment updates after we set LocalShutdownSent.
4752 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4754 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4757 assert!(send_shutdown);
4758 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4759 Ok(scriptpubkey) => scriptpubkey,
4760 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4762 if !shutdown_scriptpubkey.is_compatible(their_features) {
4763 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4765 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4770 // From here on out, we may not fail!
4772 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4773 self.context.update_time_counter += 1;
4775 let monitor_update = if update_shutdown_script {
4776 self.context.latest_monitor_update_id += 1;
4777 let monitor_update = ChannelMonitorUpdate {
4778 update_id: self.context.latest_monitor_update_id,
4779 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4780 scriptpubkey: self.get_closing_scriptpubkey(),
4783 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4784 self.push_ret_blockable_mon_update(monitor_update)
4786 let shutdown = if send_shutdown {
4787 Some(msgs::Shutdown {
4788 channel_id: self.context.channel_id,
4789 scriptpubkey: self.get_closing_scriptpubkey(),
4793 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4794 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4795 // cell HTLCs and return them to fail the payment.
4796 self.context.holding_cell_update_fee = None;
4797 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4798 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4800 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4801 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4808 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4809 self.context.update_time_counter += 1;
4811 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4814 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4815 let mut tx = closing_tx.trust().built_transaction().clone();
4817 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4819 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4820 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4821 let mut holder_sig = sig.serialize_der().to_vec();
4822 holder_sig.push(EcdsaSighashType::All as u8);
4823 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4824 cp_sig.push(EcdsaSighashType::All as u8);
4825 if funding_key[..] < counterparty_funding_key[..] {
4826 tx.input[0].witness.push(holder_sig);
4827 tx.input[0].witness.push(cp_sig);
4829 tx.input[0].witness.push(cp_sig);
4830 tx.input[0].witness.push(holder_sig);
4833 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4837 pub fn closing_signed<F: Deref>(
4838 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4839 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>), ChannelError>
4840 where F::Target: FeeEstimator
4842 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4843 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4845 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4846 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4848 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4849 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4851 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4852 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4855 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4856 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4859 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4860 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4861 return Ok((None, None));
4864 let funding_redeemscript = self.context.get_funding_redeemscript();
4865 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4866 if used_total_fee != msg.fee_satoshis {
4867 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4869 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4871 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4874 // The remote end may have decided to revoke their output due to inconsistent dust
4875 // limits, so check for that case by re-checking the signature here.
4876 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4877 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4878 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4882 for outp in closing_tx.trust().built_transaction().output.iter() {
4883 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4884 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4888 assert!(self.context.shutdown_scriptpubkey.is_some());
4889 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4890 if last_fee == msg.fee_satoshis {
4891 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4892 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4893 self.context.update_time_counter += 1;
4894 return Ok((None, Some(tx)));
4898 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4900 macro_rules! propose_fee {
4901 ($new_fee: expr) => {
4902 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4903 (closing_tx, $new_fee)
4905 self.build_closing_transaction($new_fee, false)
4908 return match &self.context.holder_signer {
4909 ChannelSignerType::Ecdsa(ecdsa) => {
4911 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4912 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4914 let signed_tx = if $new_fee == msg.fee_satoshis {
4915 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4916 self.context.update_time_counter += 1;
4917 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4921 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4922 Ok((Some(msgs::ClosingSigned {
4923 channel_id: self.context.channel_id,
4924 fee_satoshis: used_fee,
4926 fee_range: Some(msgs::ClosingSignedFeeRange {
4927 min_fee_satoshis: our_min_fee,
4928 max_fee_satoshis: our_max_fee,
4936 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4937 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4938 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4940 if max_fee_satoshis < our_min_fee {
4941 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4943 if min_fee_satoshis > our_max_fee {
4944 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4947 if !self.context.is_outbound() {
4948 // They have to pay, so pick the highest fee in the overlapping range.
4949 // We should never set an upper bound aside from their full balance
4950 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4951 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4953 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4954 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4955 msg.fee_satoshis, our_min_fee, our_max_fee)));
4957 // The proposed fee is in our acceptable range, accept it and broadcast!
4958 propose_fee!(msg.fee_satoshis);
4961 // Old fee style negotiation. We don't bother to enforce whether they are complying
4962 // with the "making progress" requirements, we just comply and hope for the best.
4963 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4964 if msg.fee_satoshis > last_fee {
4965 if msg.fee_satoshis < our_max_fee {
4966 propose_fee!(msg.fee_satoshis);
4967 } else if last_fee < our_max_fee {
4968 propose_fee!(our_max_fee);
4970 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4973 if msg.fee_satoshis > our_min_fee {
4974 propose_fee!(msg.fee_satoshis);
4975 } else if last_fee > our_min_fee {
4976 propose_fee!(our_min_fee);
4978 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4982 if msg.fee_satoshis < our_min_fee {
4983 propose_fee!(our_min_fee);
4984 } else if msg.fee_satoshis > our_max_fee {
4985 propose_fee!(our_max_fee);
4987 propose_fee!(msg.fee_satoshis);
4993 fn internal_htlc_satisfies_config(
4994 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4995 ) -> Result<(), (&'static str, u16)> {
4996 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4997 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4998 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4999 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5001 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5002 0x1000 | 12, // fee_insufficient
5005 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5007 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5008 0x1000 | 13, // incorrect_cltv_expiry
5014 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5015 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5016 /// unsuccessful, falls back to the previous one if one exists.
5017 pub fn htlc_satisfies_config(
5018 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5019 ) -> Result<(), (&'static str, u16)> {
5020 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5022 if let Some(prev_config) = self.context.prev_config() {
5023 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5030 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5031 self.context.cur_holder_commitment_transaction_number + 1
5034 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5035 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
5038 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5039 self.context.cur_counterparty_commitment_transaction_number + 2
5043 pub fn get_signer(&self) -> &ChannelSignerType<<SP::Target as SignerProvider>::Signer> {
5044 &self.context.holder_signer
5048 pub fn get_value_stat(&self) -> ChannelValueStat {
5050 value_to_self_msat: self.context.value_to_self_msat,
5051 channel_value_msat: self.context.channel_value_satoshis * 1000,
5052 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5053 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5054 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5055 holding_cell_outbound_amount_msat: {
5057 for h in self.context.holding_cell_htlc_updates.iter() {
5059 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5067 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5068 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5072 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5073 /// Allowed in any state (including after shutdown)
5074 pub fn is_awaiting_monitor_update(&self) -> bool {
5075 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
5078 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5079 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5080 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5081 self.context.blocked_monitor_updates[0].update.update_id - 1
5084 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5085 /// further blocked monitor update exists after the next.
5086 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5087 if self.context.blocked_monitor_updates.is_empty() { return None; }
5088 Some((self.context.blocked_monitor_updates.remove(0).update,
5089 !self.context.blocked_monitor_updates.is_empty()))
5092 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5093 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5094 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5095 -> Option<ChannelMonitorUpdate> {
5096 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5097 if !release_monitor {
5098 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5107 pub fn blocked_monitor_updates_pending(&self) -> usize {
5108 self.context.blocked_monitor_updates.len()
5111 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5112 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5113 /// transaction. If the channel is inbound, this implies simply that the channel has not
5115 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5116 if !self.is_awaiting_monitor_update() { return false; }
5117 if self.context.channel_state &
5118 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
5119 == ChannelState::FundingSent as u32 {
5120 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5121 // FundingSent set, though our peer could have sent their channel_ready.
5122 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5125 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5126 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5127 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
5128 // waiting for the initial monitor persistence. Thus, we check if our commitment
5129 // transaction numbers have both been iterated only exactly once (for the
5130 // funding_signed), and we're awaiting monitor update.
5132 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5133 // only way to get an awaiting-monitor-update state during initial funding is if the
5134 // initial monitor persistence is still pending).
5136 // Because deciding we're awaiting initial broadcast spuriously could result in
5137 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5138 // we hard-assert here, even in production builds.
5139 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5140 assert!(self.context.monitor_pending_channel_ready);
5141 assert_eq!(self.context.latest_monitor_update_id, 0);
5147 /// Returns true if our channel_ready has been sent
5148 pub fn is_our_channel_ready(&self) -> bool {
5149 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
5152 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5153 pub fn received_shutdown(&self) -> bool {
5154 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
5157 /// Returns true if we either initiated or agreed to shut down the channel.
5158 pub fn sent_shutdown(&self) -> bool {
5159 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
5162 /// Returns true if this channel is fully shut down. True here implies that no further actions
5163 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5164 /// will be handled appropriately by the chain monitor.
5165 pub fn is_shutdown(&self) -> bool {
5166 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
5167 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
5172 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5173 self.context.channel_update_status
5176 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5177 self.context.update_time_counter += 1;
5178 self.context.channel_update_status = status;
5181 fn check_get_channel_ready<L: Deref>(&mut self, height: u32, logger: &L) -> Option<msgs::ChannelReady>
5182 where L::Target: Logger
5185 // * always when a new block/transactions are confirmed with the new height
5186 // * when funding is signed with a height of 0
5187 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5191 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5192 if funding_tx_confirmations <= 0 {
5193 self.context.funding_tx_confirmation_height = 0;
5196 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5200 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5201 // channel_ready until the entire batch is ready.
5202 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5203 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5204 self.context.channel_state |= ChannelState::OurChannelReady as u32;
5206 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5207 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
5208 self.context.update_time_counter += 1;
5210 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5211 // We got a reorg but not enough to trigger a force close, just ignore.
5214 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
5215 // We should never see a funding transaction on-chain until we've received
5216 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5217 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5218 // however, may do this and we shouldn't treat it as a bug.
5219 #[cfg(not(fuzzing))]
5220 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5221 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5222 self.context.channel_state);
5224 // We got a reorg but not enough to trigger a force close, just ignore.
5228 // If we don't need a commitment update, then we don't need a channel_ready.
5229 if !need_commitment_update {
5233 // If a monitor update is in progress, flag that we're pending a channel ready from the monitor.
5234 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
5235 log_trace!(logger, "Monitor update in progress; setting monitor_pending_channel_ready");
5236 self.context.monitor_pending_channel_ready = true;
5240 // If the peer is disconnected, then we'll worry about sending channel_ready as part of the
5241 // reconnection process.
5242 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
5243 log_trace!(logger, "Peer is disconnected; we'll deal with channel_ready on reconnect");
5247 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5248 // channel_ready yet.
5249 if self.context.signer_pending_funding {
5250 log_trace!(logger, "Awaiting signer funding; setting signer_pending_channel_ready");
5251 self.context.signer_pending_channel_ready = true;
5255 // If we're able to get the next per-commitment point from the signer, then return a
5257 let res = self.context.holder_signer.as_ref().get_per_commitment_point(
5258 INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5260 if let Ok(next_per_commitment_point) = res {
5261 Some(msgs::ChannelReady {
5262 channel_id: self.context.channel_id,
5263 next_per_commitment_point,
5264 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5267 // Otherwise, mark us as awaiting the signer to send the channel ready.
5268 log_trace!(logger, "Awaiting signer to generate next per_commitment_point; setting signer_pending_channel_ready");
5269 self.context.signer_pending_channel_ready = true;
5274 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5275 /// In the first case, we store the confirmation height and calculating the short channel id.
5276 /// In the second, we simply return an Err indicating we need to be force-closed now.
5277 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5278 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5279 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5280 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5282 NS::Target: NodeSigner,
5285 let mut msgs = (None, None);
5286 if let Some(funding_txo) = self.context.get_funding_txo() {
5287 for &(index_in_block, tx) in txdata.iter() {
5288 // Check if the transaction is the expected funding transaction, and if it is,
5289 // check that it pays the right amount to the right script.
5290 if self.context.funding_tx_confirmation_height == 0 {
5291 if tx.txid() == funding_txo.txid {
5292 let txo_idx = funding_txo.index as usize;
5293 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5294 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5295 if self.context.is_outbound() {
5296 // If we generated the funding transaction and it doesn't match what it
5297 // should, the client is really broken and we should just panic and
5298 // tell them off. That said, because hash collisions happen with high
5299 // probability in fuzzing mode, if we're fuzzing we just close the
5300 // channel and move on.
5301 #[cfg(not(fuzzing))]
5302 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5304 self.context.update_time_counter += 1;
5305 let err_reason = "funding tx had wrong script/value or output index";
5306 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5308 if self.context.is_outbound() {
5309 if !tx.is_coin_base() {
5310 for input in tx.input.iter() {
5311 if input.witness.is_empty() {
5312 // We generated a malleable funding transaction, implying we've
5313 // just exposed ourselves to funds loss to our counterparty.
5314 #[cfg(not(fuzzing))]
5315 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5320 self.context.funding_tx_confirmation_height = height;
5321 self.context.funding_tx_confirmed_in = Some(*block_hash);
5322 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5323 Ok(scid) => Some(scid),
5324 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5327 // If this is a coinbase transaction and not a 0-conf channel
5328 // we should update our min_depth to 100 to handle coinbase maturity
5329 if tx.is_coin_base() &&
5330 self.context.minimum_depth.unwrap_or(0) > 0 &&
5331 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5332 self.context.minimum_depth = Some(COINBASE_MATURITY);
5335 // If we allow 1-conf funding, we may need to check for channel_ready here and
5336 // send it immediately instead of waiting for a best_block_updated call (which
5337 // may have already happened for this block).
5338 if let Some(channel_ready) = self.check_get_channel_ready(height, logger) {
5339 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5340 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5341 msgs = (Some(channel_ready), announcement_sigs);
5344 for inp in tx.input.iter() {
5345 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5346 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5347 return Err(ClosureReason::CommitmentTxConfirmed);
5355 /// When a new block is connected, we check the height of the block against outbound holding
5356 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5357 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5358 /// handled by the ChannelMonitor.
5360 /// If we return Err, the channel may have been closed, at which point the standard
5361 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5364 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5366 pub fn best_block_updated<NS: Deref, L: Deref>(
5367 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5368 node_signer: &NS, user_config: &UserConfig, logger: &L
5369 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5371 NS::Target: NodeSigner,
5374 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5377 fn do_best_block_updated<NS: Deref, L: Deref>(
5378 &mut self, height: u32, highest_header_time: u32,
5379 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5380 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5382 NS::Target: NodeSigner,
5385 let mut timed_out_htlcs = Vec::new();
5386 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5387 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5389 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5390 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5392 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5393 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5394 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5402 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5404 if let Some(channel_ready) = self.check_get_channel_ready(height, logger) {
5405 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5406 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5408 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5409 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5412 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5413 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5414 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5415 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5416 if self.context.funding_tx_confirmation_height == 0 {
5417 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5418 // zero if it has been reorged out, however in either case, our state flags
5419 // indicate we've already sent a channel_ready
5420 funding_tx_confirmations = 0;
5423 // If we've sent channel_ready (or have both sent and received channel_ready), and
5424 // the funding transaction has become unconfirmed,
5425 // close the channel and hope we can get the latest state on chain (because presumably
5426 // the funding transaction is at least still in the mempool of most nodes).
5428 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5429 // 0-conf channel, but not doing so may lead to the
5430 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5432 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5433 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5434 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5435 return Err(ClosureReason::ProcessingError { err: err_reason });
5437 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5438 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5439 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5440 // If funding_tx_confirmed_in is unset, the channel must not be active
5441 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5442 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5443 return Err(ClosureReason::FundingTimedOut);
5446 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5447 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5449 Ok((None, timed_out_htlcs, announcement_sigs))
5452 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5453 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5454 /// before the channel has reached channel_ready and we can just wait for more blocks.
5455 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5456 if self.context.funding_tx_confirmation_height != 0 {
5457 // We handle the funding disconnection by calling best_block_updated with a height one
5458 // below where our funding was connected, implying a reorg back to conf_height - 1.
5459 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5460 // We use the time field to bump the current time we set on channel updates if its
5461 // larger. If we don't know that time has moved forward, we can just set it to the last
5462 // time we saw and it will be ignored.
5463 let best_time = self.context.update_time_counter;
5464 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5465 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5466 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5467 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5468 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5474 // We never learned about the funding confirmation anyway, just ignore
5479 // Methods to get unprompted messages to send to the remote end (or where we already returned
5480 // something in the handler for the message that prompted this message):
5482 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5483 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5484 /// directions). Should be used for both broadcasted announcements and in response to an
5485 /// AnnouncementSignatures message from the remote peer.
5487 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5490 /// This will only return ChannelError::Ignore upon failure.
5492 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5493 fn get_channel_announcement<NS: Deref>(
5494 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5495 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5496 if !self.context.config.announced_channel {
5497 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5499 if !self.context.is_usable() {
5500 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5503 let short_channel_id = self.context.get_short_channel_id()
5504 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5505 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5506 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5507 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5508 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5510 let msg = msgs::UnsignedChannelAnnouncement {
5511 features: channelmanager::provided_channel_features(&user_config),
5514 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5515 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5516 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5517 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5518 excess_data: Vec::new(),
5524 fn get_announcement_sigs<NS: Deref, L: Deref>(
5525 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5526 best_block_height: u32, logger: &L
5527 ) -> Option<msgs::AnnouncementSignatures>
5529 NS::Target: NodeSigner,
5532 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5536 if !self.context.is_usable() {
5540 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5541 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5545 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5549 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5550 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5553 log_trace!(logger, "{:?}", e);
5557 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5559 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5564 match &self.context.holder_signer {
5565 ChannelSignerType::Ecdsa(ecdsa) => {
5566 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5568 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5573 let short_channel_id = match self.context.get_short_channel_id() {
5575 None => return None,
5578 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5580 Some(msgs::AnnouncementSignatures {
5581 channel_id: self.context.channel_id(),
5583 node_signature: our_node_sig,
5584 bitcoin_signature: our_bitcoin_sig,
5590 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5592 fn sign_channel_announcement<NS: Deref>(
5593 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5594 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5595 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5596 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5597 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5598 let were_node_one = announcement.node_id_1 == our_node_key;
5600 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5601 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5602 match &self.context.holder_signer {
5603 ChannelSignerType::Ecdsa(ecdsa) => {
5604 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5605 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5606 Ok(msgs::ChannelAnnouncement {
5607 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5608 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5609 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5610 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5611 contents: announcement,
5616 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5620 /// Processes an incoming announcement_signatures message, providing a fully-signed
5621 /// channel_announcement message which we can broadcast and storing our counterparty's
5622 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5623 pub fn announcement_signatures<NS: Deref>(
5624 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5625 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5626 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5627 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5629 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5631 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5632 return Err(ChannelError::Close(format!(
5633 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5634 &announcement, self.context.get_counterparty_node_id())));
5636 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5637 return Err(ChannelError::Close(format!(
5638 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5639 &announcement, self.context.counterparty_funding_pubkey())));
5642 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5643 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5644 return Err(ChannelError::Ignore(
5645 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5648 self.sign_channel_announcement(node_signer, announcement)
5651 /// Gets a signed channel_announcement for this channel, if we previously received an
5652 /// announcement_signatures from our counterparty.
5653 pub fn get_signed_channel_announcement<NS: Deref>(
5654 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5655 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5656 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5659 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5661 Err(_) => return None,
5663 match self.sign_channel_announcement(node_signer, announcement) {
5664 Ok(res) => Some(res),
5669 /// May panic if called on a channel that wasn't immediately-previously
5670 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5671 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5672 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5673 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5674 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5675 // current to_remote balances. However, it no longer has any use, and thus is now simply
5676 // set to a dummy (but valid, as required by the spec) public key.
5677 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5678 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5679 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5680 let mut pk = [2; 33]; pk[1] = 0xff;
5681 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5682 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5683 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5684 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5687 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5690 self.mark_awaiting_response();
5691 msgs::ChannelReestablish {
5692 channel_id: self.context.channel_id(),
5693 // The protocol has two different commitment number concepts - the "commitment
5694 // transaction number", which starts from 0 and counts up, and the "revocation key
5695 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5696 // commitment transaction numbers by the index which will be used to reveal the
5697 // revocation key for that commitment transaction, which means we have to convert them
5698 // to protocol-level commitment numbers here...
5700 // next_local_commitment_number is the next commitment_signed number we expect to
5701 // receive (indicating if they need to resend one that we missed).
5702 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5703 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5704 // receive, however we track it by the next commitment number for a remote transaction
5705 // (which is one further, as they always revoke previous commitment transaction, not
5706 // the one we send) so we have to decrement by 1. Note that if
5707 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5708 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5710 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5711 your_last_per_commitment_secret: remote_last_secret,
5712 my_current_per_commitment_point: dummy_pubkey,
5713 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5714 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5715 // txid of that interactive transaction, else we MUST NOT set it.
5716 next_funding_txid: None,
5721 // Send stuff to our remote peers:
5723 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5724 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5725 /// commitment update.
5727 /// `Err`s will only be [`ChannelError::Ignore`].
5728 pub fn queue_add_htlc<F: Deref, L: Deref>(
5729 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5730 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5731 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5732 ) -> Result<(), ChannelError>
5733 where F::Target: FeeEstimator, L::Target: Logger
5736 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5737 skimmed_fee_msat, fee_estimator, logger)
5738 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5740 if let ChannelError::Ignore(_) = err { /* fine */ }
5741 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5746 /// Adds a pending outbound HTLC to this channel, note that you probably want
5747 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5749 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5751 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5752 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5754 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5755 /// we may not yet have sent the previous commitment update messages and will need to
5756 /// regenerate them.
5758 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5759 /// on this [`Channel`] if `force_holding_cell` is false.
5761 /// `Err`s will only be [`ChannelError::Ignore`].
5762 fn send_htlc<F: Deref, L: Deref>(
5763 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5764 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5765 skimmed_fee_msat: Option<u64>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5766 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5767 where F::Target: FeeEstimator, L::Target: Logger
5769 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5770 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5772 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5773 if amount_msat > channel_total_msat {
5774 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5777 if amount_msat == 0 {
5778 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5781 let available_balances = self.context.get_available_balances(fee_estimator);
5782 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5783 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5784 available_balances.next_outbound_htlc_minimum_msat)));
5787 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5788 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5789 available_balances.next_outbound_htlc_limit_msat)));
5792 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5793 // Note that this should never really happen, if we're !is_live() on receipt of an
5794 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5795 // the user to send directly into a !is_live() channel. However, if we
5796 // disconnected during the time the previous hop was doing the commitment dance we may
5797 // end up getting here after the forwarding delay. In any case, returning an
5798 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5799 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5802 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5803 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5804 payment_hash, amount_msat,
5805 if force_holding_cell { "into holding cell" }
5806 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5807 else { "to peer" });
5809 if need_holding_cell {
5810 force_holding_cell = true;
5813 // Now update local state:
5814 if force_holding_cell {
5815 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5820 onion_routing_packet,
5826 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5827 htlc_id: self.context.next_holder_htlc_id,
5829 payment_hash: payment_hash.clone(),
5831 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5836 let res = msgs::UpdateAddHTLC {
5837 channel_id: self.context.channel_id,
5838 htlc_id: self.context.next_holder_htlc_id,
5842 onion_routing_packet,
5845 self.context.next_holder_htlc_id += 1;
5850 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5851 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5852 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5853 // fail to generate this, we still are at least at a position where upgrading their status
5855 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5856 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5857 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5859 if let Some(state) = new_state {
5860 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5864 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5865 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5866 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5867 // Grab the preimage, if it exists, instead of cloning
5868 let mut reason = OutboundHTLCOutcome::Success(None);
5869 mem::swap(outcome, &mut reason);
5870 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5873 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5874 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5875 debug_assert!(!self.context.is_outbound());
5876 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5877 self.context.feerate_per_kw = feerate;
5878 self.context.pending_update_fee = None;
5881 log_debug!(logger, "setting resend_order to RevokeAndACKFirst");
5882 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5884 let (mut htlcs_ref, counterparty_commitment_tx) =
5885 self.build_commitment_no_state_update(logger);
5886 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5887 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5888 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5890 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5891 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5894 self.context.latest_monitor_update_id += 1;
5895 let monitor_update = ChannelMonitorUpdate {
5896 update_id: self.context.latest_monitor_update_id,
5897 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5898 commitment_txid: counterparty_commitment_txid,
5899 htlc_outputs: htlcs.clone(),
5900 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5901 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5902 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5903 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5904 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5907 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5911 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5912 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5913 where L::Target: Logger
5915 let counterparty_keys = self.context.build_remote_transaction_keys();
5916 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5917 let counterparty_commitment_tx = commitment_stats.tx;
5919 #[cfg(any(test, fuzzing))]
5921 if !self.context.is_outbound() {
5922 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5923 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5924 if let Some(info) = projected_commit_tx_info {
5925 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5926 if info.total_pending_htlcs == total_pending_htlcs
5927 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5928 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5929 && info.feerate == self.context.feerate_per_kw {
5930 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5931 assert_eq!(actual_fee, info.fee);
5937 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5940 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5941 /// generation when we shouldn't change HTLC/channel state.
5942 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5943 // Get the fee tests from `build_commitment_no_state_update`
5944 #[cfg(any(test, fuzzing))]
5945 self.build_commitment_no_state_update(logger);
5947 let counterparty_keys = self.context.build_remote_transaction_keys();
5948 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5949 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5951 match &self.context.holder_signer {
5952 ChannelSignerType::Ecdsa(ecdsa) => {
5953 let (signature, htlc_signatures);
5956 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5957 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5961 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5962 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5964 htlc_signatures = res.1;
5966 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5967 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5968 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5969 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5971 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5972 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5973 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5974 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5975 log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
5976 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5980 Ok((msgs::CommitmentSigned {
5981 channel_id: self.context.channel_id,
5985 partial_signature_with_nonce: None,
5986 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5991 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5992 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5994 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5995 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5996 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5997 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5998 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5999 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6000 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6001 where F::Target: FeeEstimator, L::Target: Logger
6003 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6004 onion_routing_packet, false, skimmed_fee_msat, fee_estimator, logger);
6005 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6008 let monitor_update = self.build_commitment_no_status_check(logger);
6009 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6010 Ok(self.push_ret_blockable_mon_update(monitor_update))
6016 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6018 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6019 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6020 fee_base_msat: msg.contents.fee_base_msat,
6021 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6022 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6024 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6026 self.context.counterparty_forwarding_info = new_forwarding_info;
6032 /// Begins the shutdown process, getting a message for the remote peer and returning all
6033 /// holding cell HTLCs for payment failure.
6035 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
6036 /// [`ChannelMonitorUpdate`] will be returned).
6037 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6038 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6039 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6041 for htlc in self.context.pending_outbound_htlcs.iter() {
6042 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6043 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6046 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
6047 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
6048 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6050 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
6051 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6054 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6055 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6057 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
6058 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
6059 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6062 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
6063 // script is set, we just force-close and call it a day.
6064 let mut chan_closed = false;
6065 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
6069 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6071 None if !chan_closed => {
6072 // use override shutdown script if provided
6073 let shutdown_scriptpubkey = match override_shutdown_script {
6074 Some(script) => script,
6076 // otherwise, use the shutdown scriptpubkey provided by the signer
6077 match signer_provider.get_shutdown_scriptpubkey() {
6078 Ok(scriptpubkey) => scriptpubkey,
6079 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6083 if !shutdown_scriptpubkey.is_compatible(their_features) {
6084 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6086 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6092 // From here on out, we may not fail!
6093 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6094 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
6095 self.context.channel_state = ChannelState::ShutdownComplete as u32;
6097 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
6099 self.context.update_time_counter += 1;
6101 let monitor_update = if update_shutdown_script {
6102 self.context.latest_monitor_update_id += 1;
6103 let monitor_update = ChannelMonitorUpdate {
6104 update_id: self.context.latest_monitor_update_id,
6105 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6106 scriptpubkey: self.get_closing_scriptpubkey(),
6109 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6110 self.push_ret_blockable_mon_update(monitor_update)
6112 let shutdown = msgs::Shutdown {
6113 channel_id: self.context.channel_id,
6114 scriptpubkey: self.get_closing_scriptpubkey(),
6117 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6118 // our shutdown until we've committed all of the pending changes.
6119 self.context.holding_cell_update_fee = None;
6120 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6121 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6123 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6124 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6131 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6132 "we can't both complete shutdown and return a monitor update");
6134 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6137 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6138 self.context.holding_cell_htlc_updates.iter()
6139 .flat_map(|htlc_update| {
6141 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6142 => Some((source, payment_hash)),
6146 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6150 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6151 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6152 pub context: ChannelContext<SP>,
6153 pub unfunded_context: UnfundedChannelContext,
6154 pub signer_pending_open_channel: bool,
6157 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6158 pub fn new<ES: Deref, F: Deref>(
6159 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6160 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6161 outbound_scid_alias: u64
6162 ) -> Result<OutboundV1Channel<SP>, APIError>
6163 where ES::Target: EntropySource,
6164 F::Target: FeeEstimator
6166 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6167 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6168 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6169 let pubkeys = holder_signer.pubkeys().clone();
6171 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6172 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6174 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6175 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6177 let channel_value_msat = channel_value_satoshis * 1000;
6178 if push_msat > channel_value_msat {
6179 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6181 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6182 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6184 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6185 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6186 // Protocol level safety check in place, although it should never happen because
6187 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6188 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6191 let channel_type = Self::get_initial_channel_type(&config, their_features);
6192 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6194 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6195 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6197 (ConfirmationTarget::NonAnchorChannelFee, 0)
6199 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6201 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6202 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6203 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6204 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6207 let mut secp_ctx = Secp256k1::new();
6208 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6210 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6211 match signer_provider.get_shutdown_scriptpubkey() {
6212 Ok(scriptpubkey) => Some(scriptpubkey),
6213 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6217 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6218 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6219 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6223 let destination_script = match signer_provider.get_destination_script() {
6224 Ok(script) => script,
6225 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6228 let temporary_channel_id = ChannelId::temporary_from_entropy_source(entropy_source);
6230 let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).ok();
6233 context: ChannelContext {
6236 config: LegacyChannelConfig {
6237 options: config.channel_config.clone(),
6238 announced_channel: config.channel_handshake_config.announced_channel,
6239 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6244 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6246 channel_id: temporary_channel_id,
6247 temporary_channel_id: Some(temporary_channel_id),
6248 channel_state: ChannelState::OurInitSent as u32,
6249 announcement_sigs_state: AnnouncementSigsState::NotSent,
6251 channel_value_satoshis,
6253 latest_monitor_update_id: 0,
6255 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6256 shutdown_scriptpubkey,
6259 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6260 cur_holder_commitment_point,
6261 prev_holder_commitment_secret: None,
6262 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6265 pending_inbound_htlcs: Vec::new(),
6266 pending_outbound_htlcs: Vec::new(),
6267 holding_cell_htlc_updates: Vec::new(),
6268 pending_update_fee: None,
6269 holding_cell_update_fee: None,
6270 next_holder_htlc_id: 0,
6271 next_counterparty_htlc_id: 0,
6272 update_time_counter: 1,
6274 resend_order: RAACommitmentOrder::CommitmentFirst,
6276 monitor_pending_channel_ready: false,
6277 monitor_pending_revoke_and_ack: false,
6278 monitor_pending_commitment_signed: false,
6279 monitor_pending_forwards: Vec::new(),
6280 monitor_pending_failures: Vec::new(),
6281 monitor_pending_finalized_fulfills: Vec::new(),
6283 signer_pending_commitment_update: false,
6284 signer_pending_revoke_and_ack: false,
6285 signer_pending_funding: false,
6286 signer_pending_channel_ready: false,
6287 signer_pending_commitment_point: false,
6288 signer_pending_released_secret: false,
6290 #[cfg(debug_assertions)]
6291 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6292 #[cfg(debug_assertions)]
6293 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6295 last_sent_closing_fee: None,
6296 pending_counterparty_closing_signed: None,
6297 closing_fee_limits: None,
6298 target_closing_feerate_sats_per_kw: None,
6300 funding_tx_confirmed_in: None,
6301 funding_tx_confirmation_height: 0,
6302 short_channel_id: None,
6303 channel_creation_height: current_chain_height,
6305 feerate_per_kw: commitment_feerate,
6306 counterparty_dust_limit_satoshis: 0,
6307 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6308 counterparty_max_htlc_value_in_flight_msat: 0,
6309 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6310 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6311 holder_selected_channel_reserve_satoshis,
6312 counterparty_htlc_minimum_msat: 0,
6313 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6314 counterparty_max_accepted_htlcs: 0,
6315 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6316 minimum_depth: None, // Filled in in accept_channel
6318 counterparty_forwarding_info: None,
6320 channel_transaction_parameters: ChannelTransactionParameters {
6321 holder_pubkeys: pubkeys,
6322 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6323 is_outbound_from_holder: true,
6324 counterparty_parameters: None,
6325 funding_outpoint: None,
6326 channel_type_features: channel_type.clone()
6328 funding_transaction: None,
6329 is_batch_funding: None,
6331 counterparty_cur_commitment_point: None,
6332 counterparty_prev_commitment_point: None,
6333 counterparty_node_id,
6335 counterparty_shutdown_scriptpubkey: None,
6337 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6339 channel_update_status: ChannelUpdateStatus::Enabled,
6340 closing_signed_in_flight: false,
6342 announcement_sigs: None,
6344 #[cfg(any(test, fuzzing))]
6345 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6346 #[cfg(any(test, fuzzing))]
6347 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6349 workaround_lnd_bug_4006: None,
6350 sent_message_awaiting_response: None,
6352 latest_inbound_scid_alias: None,
6353 outbound_scid_alias,
6355 channel_pending_event_emitted: false,
6356 channel_ready_event_emitted: false,
6358 #[cfg(any(test, fuzzing))]
6359 historical_inbound_htlc_fulfills: HashSet::new(),
6364 blocked_monitor_updates: Vec::new(),
6366 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
6367 signer_pending_open_channel: false,
6371 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6372 /// a funding_created message for the remote peer.
6373 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6374 /// or if called on an inbound channel.
6375 /// Note that channel_id changes during this call!
6376 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6377 /// If an Err is returned, it is a ChannelError::Close.
6378 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6379 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6380 if !self.context.is_outbound() {
6381 panic!("Tried to create outbound funding_created message on an inbound channel!");
6383 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6384 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6386 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6387 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6388 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6389 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6392 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6393 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6395 // Now that we're past error-generating stuff, update our local state:
6397 self.context.channel_state = ChannelState::FundingCreated as u32;
6398 self.context.channel_id = funding_txo.to_channel_id();
6400 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6401 // We can skip this if it is a zero-conf channel.
6402 if funding_transaction.is_coin_base() &&
6403 self.context.minimum_depth.unwrap_or(0) > 0 &&
6404 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6405 self.context.minimum_depth = Some(COINBASE_MATURITY);
6408 self.context.funding_transaction = Some(funding_transaction);
6409 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6411 let funding_created = self.context.get_funding_created_msg(logger);
6412 if funding_created.is_none() {
6413 if !self.context.signer_pending_funding {
6414 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6415 self.context.signer_pending_funding = true;
6419 let channel = Channel {
6420 context: self.context,
6423 Ok((channel, funding_created))
6426 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6427 // The default channel type (ie the first one we try) depends on whether the channel is
6428 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6429 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6430 // with no other changes, and fall back to `only_static_remotekey`.
6431 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6432 if !config.channel_handshake_config.announced_channel &&
6433 config.channel_handshake_config.negotiate_scid_privacy &&
6434 their_features.supports_scid_privacy() {
6435 ret.set_scid_privacy_required();
6438 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6439 // set it now. If they don't understand it, we'll fall back to our default of
6440 // `only_static_remotekey`.
6441 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6442 their_features.supports_anchors_zero_fee_htlc_tx() {
6443 ret.set_anchors_zero_fee_htlc_tx_required();
6449 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6450 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6451 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6452 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6453 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6454 ) -> Result<Option<msgs::OpenChannel>, ()>
6456 F::Target: FeeEstimator
6458 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6459 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6460 // We've exhausted our options
6463 // We support opening a few different types of channels. Try removing our additional
6464 // features one by one until we've either arrived at our default or the counterparty has
6467 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6468 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6469 // checks whether the counterparty supports every feature, this would only happen if the
6470 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6472 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6473 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6474 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6475 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6476 } else if self.context.channel_type.supports_scid_privacy() {
6477 self.context.channel_type.clear_scid_privacy();
6479 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6481 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6482 let opt_msg = self.get_open_channel(chain_hash);
6483 if opt_msg.is_none() {
6484 self.signer_pending_open_channel = true;
6489 pub fn get_open_channel(&self, chain_hash: ChainHash) -> Option<msgs::OpenChannel> {
6490 if !self.context.is_outbound() {
6491 panic!("Tried to open a channel for an inbound channel?");
6493 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6494 panic!("Cannot generate an open_channel after we've moved forward");
6497 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6498 panic!("Tried to send an open_channel for a channel that has already advanced");
6501 let keys = self.context.get_holder_pubkeys();
6503 self.context.cur_holder_commitment_point.map(|first_per_commitment_point| {
6506 temporary_channel_id: self.context.channel_id,
6507 funding_satoshis: self.context.channel_value_satoshis,
6508 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6509 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6510 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6511 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6512 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6513 feerate_per_kw: self.context.feerate_per_kw as u32,
6514 to_self_delay: self.context.get_holder_selected_contest_delay(),
6515 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6516 funding_pubkey: keys.funding_pubkey,
6517 revocation_basepoint: keys.revocation_basepoint,
6518 payment_point: keys.payment_point,
6519 delayed_payment_basepoint: keys.delayed_payment_basepoint,
6520 htlc_basepoint: keys.htlc_basepoint,
6521 first_per_commitment_point,
6522 channel_flags: if self.context.config.announced_channel {1} else {0},
6523 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6524 Some(script) => script.clone().into_inner(),
6525 None => Builder::new().into_script(),
6527 channel_type: Some(self.context.channel_type.clone()),
6533 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6534 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6536 // Check sanity of message fields:
6537 if !self.context.is_outbound() {
6538 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6540 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6541 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6543 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6544 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6546 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6547 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6549 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6550 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6552 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6553 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6554 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6556 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6557 if msg.htlc_minimum_msat >= full_channel_value_msat {
6558 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6560 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6561 if msg.to_self_delay > max_delay_acceptable {
6562 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6564 if msg.max_accepted_htlcs < 1 {
6565 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6567 if msg.max_accepted_htlcs > MAX_HTLCS {
6568 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6571 // Now check against optional parameters as set by config...
6572 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6573 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6575 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6576 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6578 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6579 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6581 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6582 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6584 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6585 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6587 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6588 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6590 if msg.minimum_depth > peer_limits.max_minimum_depth {
6591 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6594 if let Some(ty) = &msg.channel_type {
6595 if *ty != self.context.channel_type {
6596 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6598 } else if their_features.supports_channel_type() {
6599 // Assume they've accepted the channel type as they said they understand it.
6601 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6602 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6603 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6605 self.context.channel_type = channel_type.clone();
6606 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6609 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6610 match &msg.shutdown_scriptpubkey {
6611 &Some(ref script) => {
6612 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6613 if script.len() == 0 {
6616 if !script::is_bolt2_compliant(&script, their_features) {
6617 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6619 Some(script.clone())
6622 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6624 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6629 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6630 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6631 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6632 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6633 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6635 if peer_limits.trust_own_funding_0conf {
6636 self.context.minimum_depth = Some(msg.minimum_depth);
6638 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6641 let counterparty_pubkeys = ChannelPublicKeys {
6642 funding_pubkey: msg.funding_pubkey,
6643 revocation_basepoint: msg.revocation_basepoint,
6644 payment_point: msg.payment_point,
6645 delayed_payment_basepoint: msg.delayed_payment_basepoint,
6646 htlc_basepoint: msg.htlc_basepoint
6649 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6650 selected_contest_delay: msg.to_self_delay,
6651 pubkeys: counterparty_pubkeys,
6654 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6655 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6657 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6658 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6663 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6666 pub fn signer_maybe_unblocked<L: Deref>(&mut self, chain_hash: &ChainHash, logger: &L) -> UnfundedOutboundV1SignerResumeUpdates
6667 where L::Target: Logger
6669 let open_channel = if self.signer_pending_open_channel {
6670 self.context.update_holder_per_commitment(logger);
6671 self.get_open_channel(chain_hash.clone()).map(|msg| {
6672 log_trace!(logger, "Clearing signer_pending_open_channel");
6673 self.signer_pending_open_channel = false;
6677 UnfundedOutboundV1SignerResumeUpdates {
6683 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6684 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6685 pub context: ChannelContext<SP>,
6686 pub unfunded_context: UnfundedChannelContext,
6687 pub signer_pending_accept_channel: bool,
6690 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6691 /// Creates a new channel from a remote sides' request for one.
6692 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6693 pub fn new<ES: Deref, F: Deref, L: Deref>(
6694 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6695 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6696 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6697 current_chain_height: u32, logger: &L, is_0conf: bool,
6698 ) -> Result<InboundV1Channel<SP>, ChannelError>
6699 where ES::Target: EntropySource,
6700 F::Target: FeeEstimator,
6703 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6705 // First check the channel type is known, failing before we do anything else if we don't
6706 // support this channel type.
6707 let channel_type = if let Some(channel_type) = &msg.channel_type {
6708 if channel_type.supports_any_optional_bits() {
6709 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6712 // We only support the channel types defined by the `ChannelManager` in
6713 // `provided_channel_type_features`. The channel type must always support
6714 // `static_remote_key`.
6715 if !channel_type.requires_static_remote_key() {
6716 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6718 // Make sure we support all of the features behind the channel type.
6719 if !channel_type.is_subset(our_supported_features) {
6720 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6722 if channel_type.requires_scid_privacy() && announced_channel {
6723 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6725 channel_type.clone()
6727 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6728 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6729 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6734 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6735 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6736 let pubkeys = holder_signer.pubkeys().clone();
6737 let counterparty_pubkeys = ChannelPublicKeys {
6738 funding_pubkey: msg.funding_pubkey,
6739 revocation_basepoint: msg.revocation_basepoint,
6740 payment_point: msg.payment_point,
6741 delayed_payment_basepoint: msg.delayed_payment_basepoint,
6742 htlc_basepoint: msg.htlc_basepoint
6745 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6746 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6749 // Check sanity of message fields:
6750 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6751 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6753 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6754 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6756 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6757 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6759 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6760 if msg.push_msat > full_channel_value_msat {
6761 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6763 if msg.dust_limit_satoshis > msg.funding_satoshis {
6764 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6766 if msg.htlc_minimum_msat >= full_channel_value_msat {
6767 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6769 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, logger)?;
6771 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6772 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6773 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6775 if msg.max_accepted_htlcs < 1 {
6776 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6778 if msg.max_accepted_htlcs > MAX_HTLCS {
6779 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6782 // Now check against optional parameters as set by config...
6783 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6784 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6786 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6787 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6789 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6790 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6792 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6793 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6795 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6796 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6798 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6799 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6801 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6802 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6805 // Convert things into internal flags and prep our state:
6807 if config.channel_handshake_limits.force_announced_channel_preference {
6808 if config.channel_handshake_config.announced_channel != announced_channel {
6809 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6813 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6814 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6815 // Protocol level safety check in place, although it should never happen because
6816 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6817 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6819 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6820 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6822 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6823 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6824 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6826 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6827 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6830 // check if the funder's amount for the initial commitment tx is sufficient
6831 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6832 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6833 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6837 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6838 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6839 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6840 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6843 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6844 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6845 // want to push much to us), our counterparty should always have more than our reserve.
6846 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6847 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6850 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6851 match &msg.shutdown_scriptpubkey {
6852 &Some(ref script) => {
6853 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6854 if script.len() == 0 {
6857 if !script::is_bolt2_compliant(&script, their_features) {
6858 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6860 Some(script.clone())
6863 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6865 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6870 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6871 match signer_provider.get_shutdown_scriptpubkey() {
6872 Ok(scriptpubkey) => Some(scriptpubkey),
6873 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6877 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6878 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6879 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6883 let destination_script = match signer_provider.get_destination_script() {
6884 Ok(script) => script,
6885 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6888 let mut secp_ctx = Secp256k1::new();
6889 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6891 let minimum_depth = if is_0conf {
6894 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6896 let cur_holder_commitment_point = holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, &secp_ctx).ok();
6899 context: ChannelContext {
6902 config: LegacyChannelConfig {
6903 options: config.channel_config.clone(),
6905 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6910 inbound_handshake_limits_override: None,
6912 temporary_channel_id: Some(msg.temporary_channel_id),
6913 channel_id: msg.temporary_channel_id,
6914 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6915 announcement_sigs_state: AnnouncementSigsState::NotSent,
6918 latest_monitor_update_id: 0,
6920 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6921 shutdown_scriptpubkey,
6924 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6925 cur_holder_commitment_point,
6926 prev_holder_commitment_secret: None,
6927 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6928 value_to_self_msat: msg.push_msat,
6930 pending_inbound_htlcs: Vec::new(),
6931 pending_outbound_htlcs: Vec::new(),
6932 holding_cell_htlc_updates: Vec::new(),
6933 pending_update_fee: None,
6934 holding_cell_update_fee: None,
6935 next_holder_htlc_id: 0,
6936 next_counterparty_htlc_id: 0,
6937 update_time_counter: 1,
6939 resend_order: RAACommitmentOrder::CommitmentFirst,
6941 monitor_pending_channel_ready: false,
6942 monitor_pending_revoke_and_ack: false,
6943 monitor_pending_commitment_signed: false,
6944 monitor_pending_forwards: Vec::new(),
6945 monitor_pending_failures: Vec::new(),
6946 monitor_pending_finalized_fulfills: Vec::new(),
6948 signer_pending_commitment_update: false,
6949 signer_pending_revoke_and_ack: false,
6950 signer_pending_funding: false,
6951 signer_pending_channel_ready: false,
6952 signer_pending_commitment_point: false,
6953 signer_pending_released_secret: false,
6955 #[cfg(debug_assertions)]
6956 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6957 #[cfg(debug_assertions)]
6958 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6960 last_sent_closing_fee: None,
6961 pending_counterparty_closing_signed: None,
6962 closing_fee_limits: None,
6963 target_closing_feerate_sats_per_kw: None,
6965 funding_tx_confirmed_in: None,
6966 funding_tx_confirmation_height: 0,
6967 short_channel_id: None,
6968 channel_creation_height: current_chain_height,
6970 feerate_per_kw: msg.feerate_per_kw,
6971 channel_value_satoshis: msg.funding_satoshis,
6972 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6973 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6974 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6975 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6976 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6977 holder_selected_channel_reserve_satoshis,
6978 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6979 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6980 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6981 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6984 counterparty_forwarding_info: None,
6986 channel_transaction_parameters: ChannelTransactionParameters {
6987 holder_pubkeys: pubkeys,
6988 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6989 is_outbound_from_holder: false,
6990 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6991 selected_contest_delay: msg.to_self_delay,
6992 pubkeys: counterparty_pubkeys,
6994 funding_outpoint: None,
6995 channel_type_features: channel_type.clone()
6997 funding_transaction: None,
6998 is_batch_funding: None,
7000 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7001 counterparty_prev_commitment_point: None,
7002 counterparty_node_id,
7004 counterparty_shutdown_scriptpubkey,
7006 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7008 channel_update_status: ChannelUpdateStatus::Enabled,
7009 closing_signed_in_flight: false,
7011 announcement_sigs: None,
7013 #[cfg(any(test, fuzzing))]
7014 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7015 #[cfg(any(test, fuzzing))]
7016 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7018 workaround_lnd_bug_4006: None,
7019 sent_message_awaiting_response: None,
7021 latest_inbound_scid_alias: None,
7022 outbound_scid_alias: 0,
7024 channel_pending_event_emitted: false,
7025 channel_ready_event_emitted: false,
7027 #[cfg(any(test, fuzzing))]
7028 historical_inbound_htlc_fulfills: HashSet::new(),
7033 blocked_monitor_updates: Vec::new(),
7035 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
7036 signer_pending_accept_channel: false,
7042 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7043 /// should be sent back to the counterparty node.
7045 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7046 pub fn accept_inbound_channel(&mut self) -> Option<msgs::AcceptChannel> {
7047 if self.context.is_outbound() {
7048 panic!("Tried to send accept_channel for an outbound channel?");
7050 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
7051 panic!("Tried to send accept_channel after channel had moved forward");
7053 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7054 panic!("Tried to send an accept_channel for a channel that has already advanced");
7057 self.generate_accept_channel_message()
7060 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7061 /// inbound channel. If the intention is to accept an inbound channel, use
7062 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7064 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7065 fn generate_accept_channel_message(&self) -> Option<msgs::AcceptChannel> {
7066 self.context.cur_holder_commitment_point.map(|first_per_commitment_point| {
7067 let keys = self.context.get_holder_pubkeys();
7068 msgs::AcceptChannel {
7069 temporary_channel_id: self.context.channel_id,
7070 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7071 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7072 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7073 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7074 minimum_depth: self.context.minimum_depth.unwrap(),
7075 to_self_delay: self.context.get_holder_selected_contest_delay(),
7076 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7077 funding_pubkey: keys.funding_pubkey,
7078 revocation_basepoint: keys.revocation_basepoint,
7079 payment_point: keys.payment_point,
7080 delayed_payment_basepoint: keys.delayed_payment_basepoint,
7081 htlc_basepoint: keys.htlc_basepoint,
7082 first_per_commitment_point,
7083 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7084 Some(script) => script.clone().into_inner(),
7085 None => Builder::new().into_script(),
7087 channel_type: Some(self.context.channel_type.clone()),
7089 next_local_nonce: None,
7094 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7095 /// inbound channel without accepting it.
7097 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7099 pub fn get_accept_channel_message(&self) -> Option<msgs::AcceptChannel> {
7100 self.generate_accept_channel_message()
7103 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7104 let funding_script = self.context.get_funding_redeemscript();
7106 let keys = self.context.build_next_holder_transaction_keys();
7107 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7108 let trusted_tx = initial_commitment_tx.trust();
7109 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7110 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7111 // They sign the holder commitment transaction...
7112 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7113 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7114 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7115 encode::serialize_hex(&funding_script), &self.context.channel_id());
7116 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7118 Ok(initial_commitment_tx)
7121 pub fn funding_created<L: Deref>(
7122 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7123 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::Signer>), (Self, ChannelError)>
7127 if self.context.is_outbound() {
7128 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7130 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
7131 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7132 // remember the channel, so it's safe to just send an error_message here and drop the
7134 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7136 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7137 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7138 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7139 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7142 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7143 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7144 // This is an externally observable change before we finish all our checks. In particular
7145 // check_funding_created_signature may fail.
7146 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7148 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7150 Err(ChannelError::Close(e)) => {
7151 self.context.channel_transaction_parameters.funding_outpoint = None;
7152 return Err((self, ChannelError::Close(e)));
7155 // The only error we know how to handle is ChannelError::Close, so we fall over here
7156 // to make sure we don't continue with an inconsistent state.
7157 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7161 let holder_commitment_tx = HolderCommitmentTransaction::new(
7162 initial_commitment_tx,
7165 &self.context.get_holder_pubkeys().funding_pubkey,
7166 self.context.counterparty_funding_pubkey()
7169 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7170 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7173 // Now that we're past error-generating stuff, update our local state:
7175 self.context.channel_state = ChannelState::FundingSent as u32;
7176 self.context.channel_id = funding_txo.to_channel_id();
7177 self.context.cur_counterparty_commitment_transaction_number -= 1;
7178 self.context.cur_holder_commitment_transaction_number -= 1;
7179 self.context.update_holder_per_commitment(logger);
7181 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7183 let funding_redeemscript = self.context.get_funding_redeemscript();
7184 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7185 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7186 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7187 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7188 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7189 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7190 shutdown_script, self.context.get_holder_selected_contest_delay(),
7191 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7192 &self.context.channel_transaction_parameters,
7193 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7195 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7197 channel_monitor.provide_initial_counterparty_commitment_tx(
7198 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7199 self.context.cur_counterparty_commitment_transaction_number + 1,
7200 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7201 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7202 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7204 log_info!(logger, "{} funding_signed for peer for channel {}",
7205 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7207 // Promote the channel to a full-fledged one now that we have updated the state and have a
7208 // `ChannelMonitor`.
7209 let mut channel = Channel {
7210 context: self.context,
7213 let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some();
7214 log_trace!(logger, "funding_created {} channel_ready", if need_channel_ready { "needs" } else { "does not need" });
7215 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7217 Ok((channel, funding_signed, channel_monitor))
7220 /// Indicates that the signer may have some signatures for us, so we should retry if we're
7223 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> UnfundedInboundV1SignerResumeUpdates
7224 where L::Target: Logger
7226 let accept_channel = if self.signer_pending_accept_channel {
7227 self.context.update_holder_per_commitment(logger);
7228 self.generate_accept_channel_message().map(|msg| {
7229 log_trace!(logger, "Clearing signer_pending_accept_channel");
7230 self.signer_pending_accept_channel = false;
7234 UnfundedInboundV1SignerResumeUpdates {
7240 const SERIALIZATION_VERSION: u8 = 3;
7241 const MIN_SERIALIZATION_VERSION: u8 = 2;
7243 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7249 impl Writeable for ChannelUpdateStatus {
7250 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7251 // We only care about writing out the current state as it was announced, ie only either
7252 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7253 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7255 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7256 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7257 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7258 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7264 impl Readable for ChannelUpdateStatus {
7265 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7266 Ok(match <u8 as Readable>::read(reader)? {
7267 0 => ChannelUpdateStatus::Enabled,
7268 1 => ChannelUpdateStatus::Disabled,
7269 _ => return Err(DecodeError::InvalidValue),
7274 impl Writeable for AnnouncementSigsState {
7275 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7276 // We only care about writing out the current state as if we had just disconnected, at
7277 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7279 AnnouncementSigsState::NotSent => 0u8.write(writer),
7280 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7281 AnnouncementSigsState::Committed => 0u8.write(writer),
7282 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7287 impl Readable for AnnouncementSigsState {
7288 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7289 Ok(match <u8 as Readable>::read(reader)? {
7290 0 => AnnouncementSigsState::NotSent,
7291 1 => AnnouncementSigsState::PeerReceived,
7292 _ => return Err(DecodeError::InvalidValue),
7297 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7298 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7299 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7302 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7304 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7305 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7306 // the low bytes now and the optional high bytes later.
7307 let user_id_low = self.context.user_id as u64;
7308 user_id_low.write(writer)?;
7310 // Version 1 deserializers expected to read parts of the config object here. Version 2
7311 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7312 // `minimum_depth` we simply write dummy values here.
7313 writer.write_all(&[0; 8])?;
7315 self.context.channel_id.write(writer)?;
7316 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
7317 self.context.channel_value_satoshis.write(writer)?;
7319 self.context.latest_monitor_update_id.write(writer)?;
7321 let mut key_data = VecWriter(Vec::new());
7322 // TODO (taproot|arik): Introduce serialization distinction for non-ECDSA signers.
7323 self.context.holder_signer.as_ecdsa().expect("Only ECDSA signers may be serialized").write(&mut key_data)?;
7324 assert!(key_data.0.len() < core::usize::MAX);
7325 assert!(key_data.0.len() < core::u32::MAX as usize);
7326 (key_data.0.len() as u32).write(writer)?;
7327 writer.write_all(&key_data.0[..])?;
7329 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7330 // deserialized from that format.
7331 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7332 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7333 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7335 self.context.destination_script.write(writer)?;
7337 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7338 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7339 self.context.value_to_self_msat.write(writer)?;
7341 let mut dropped_inbound_htlcs = 0;
7342 for htlc in self.context.pending_inbound_htlcs.iter() {
7343 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7344 dropped_inbound_htlcs += 1;
7347 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7348 for htlc in self.context.pending_inbound_htlcs.iter() {
7349 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7352 htlc.htlc_id.write(writer)?;
7353 htlc.amount_msat.write(writer)?;
7354 htlc.cltv_expiry.write(writer)?;
7355 htlc.payment_hash.write(writer)?;
7357 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7358 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7360 htlc_state.write(writer)?;
7362 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7364 htlc_state.write(writer)?;
7366 &InboundHTLCState::Committed => {
7369 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7371 removal_reason.write(writer)?;
7376 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7377 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7379 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7380 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
7381 htlc.htlc_id.write(writer)?;
7382 htlc.amount_msat.write(writer)?;
7383 htlc.cltv_expiry.write(writer)?;
7384 htlc.payment_hash.write(writer)?;
7385 htlc.source.write(writer)?;
7387 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7389 onion_packet.write(writer)?;
7391 &OutboundHTLCState::Committed => {
7394 &OutboundHTLCState::RemoteRemoved(_) => {
7395 // Treat this as a Committed because we haven't received the CS - they'll
7396 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7399 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7401 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7402 preimages.push(preimage);
7404 let reason: Option<&HTLCFailReason> = outcome.into();
7405 reason.write(writer)?;
7407 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7409 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7410 preimages.push(preimage);
7412 let reason: Option<&HTLCFailReason> = outcome.into();
7413 reason.write(writer)?;
7416 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
7417 if pending_outbound_skimmed_fees.is_empty() {
7418 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
7420 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
7421 } else if !pending_outbound_skimmed_fees.is_empty() {
7422 pending_outbound_skimmed_fees.push(None);
7426 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7427 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7428 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7430 &HTLCUpdateAwaitingACK::AddHTLC {
7431 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7435 amount_msat.write(writer)?;
7436 cltv_expiry.write(writer)?;
7437 payment_hash.write(writer)?;
7438 source.write(writer)?;
7439 onion_routing_packet.write(writer)?;
7441 if let Some(skimmed_fee) = skimmed_fee_msat {
7442 if holding_cell_skimmed_fees.is_empty() {
7443 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7445 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7446 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7448 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7450 payment_preimage.write(writer)?;
7451 htlc_id.write(writer)?;
7453 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7455 htlc_id.write(writer)?;
7456 err_packet.write(writer)?;
7461 match self.context.resend_order {
7462 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7463 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7466 self.context.monitor_pending_channel_ready.write(writer)?;
7467 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7468 self.context.monitor_pending_commitment_signed.write(writer)?;
7470 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7471 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7472 pending_forward.write(writer)?;
7473 htlc_id.write(writer)?;
7476 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7477 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7478 htlc_source.write(writer)?;
7479 payment_hash.write(writer)?;
7480 fail_reason.write(writer)?;
7483 if self.context.is_outbound() {
7484 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7485 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7486 Some(feerate).write(writer)?;
7488 // As for inbound HTLCs, if the update was only announced and never committed in a
7489 // commitment_signed, drop it.
7490 None::<u32>.write(writer)?;
7492 self.context.holding_cell_update_fee.write(writer)?;
7494 self.context.next_holder_htlc_id.write(writer)?;
7495 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7496 self.context.update_time_counter.write(writer)?;
7497 self.context.feerate_per_kw.write(writer)?;
7499 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7500 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7501 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7502 // consider the stale state on reload.
7505 self.context.funding_tx_confirmed_in.write(writer)?;
7506 self.context.funding_tx_confirmation_height.write(writer)?;
7507 self.context.short_channel_id.write(writer)?;
7509 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7510 self.context.holder_dust_limit_satoshis.write(writer)?;
7511 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7513 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7514 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7516 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7517 self.context.holder_htlc_minimum_msat.write(writer)?;
7518 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7520 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7521 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7523 match &self.context.counterparty_forwarding_info {
7526 info.fee_base_msat.write(writer)?;
7527 info.fee_proportional_millionths.write(writer)?;
7528 info.cltv_expiry_delta.write(writer)?;
7530 None => 0u8.write(writer)?
7533 self.context.channel_transaction_parameters.write(writer)?;
7534 self.context.funding_transaction.write(writer)?;
7536 self.context.counterparty_cur_commitment_point.write(writer)?;
7537 self.context.counterparty_prev_commitment_point.write(writer)?;
7538 self.context.counterparty_node_id.write(writer)?;
7540 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7542 self.context.commitment_secrets.write(writer)?;
7544 self.context.channel_update_status.write(writer)?;
7546 #[cfg(any(test, fuzzing))]
7547 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7548 #[cfg(any(test, fuzzing))]
7549 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7550 htlc.write(writer)?;
7553 // If the channel type is something other than only-static-remote-key, then we need to have
7554 // older clients fail to deserialize this channel at all. If the type is
7555 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7557 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7558 Some(&self.context.channel_type) } else { None };
7560 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7561 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7562 // a different percentage of the channel value then 10%, which older versions of LDK used
7563 // to set it to before the percentage was made configurable.
7564 let serialized_holder_selected_reserve =
7565 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7566 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7568 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7569 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7570 let serialized_holder_htlc_max_in_flight =
7571 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7572 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7574 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7575 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7577 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7578 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7579 // we write the high bytes as an option here.
7580 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7582 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7584 write_tlv_fields!(writer, {
7585 (0, self.context.announcement_sigs, option),
7586 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7587 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7588 // them twice, once with their original default values above, and once as an option
7589 // here. On the read side, old versions will simply ignore the odd-type entries here,
7590 // and new versions map the default values to None and allow the TLV entries here to
7592 (1, self.context.minimum_depth, option),
7593 (2, chan_type, option),
7594 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7595 (4, serialized_holder_selected_reserve, option),
7596 (5, self.context.config, required),
7597 (6, serialized_holder_htlc_max_in_flight, option),
7598 (7, self.context.shutdown_scriptpubkey, option),
7599 (8, self.context.blocked_monitor_updates, optional_vec),
7600 (9, self.context.target_closing_feerate_sats_per_kw, option),
7601 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7602 (13, self.context.channel_creation_height, required),
7603 (15, preimages, required_vec),
7604 (17, self.context.announcement_sigs_state, required),
7605 (19, self.context.latest_inbound_scid_alias, option),
7606 (21, self.context.outbound_scid_alias, required),
7607 (23, channel_ready_event_emitted, option),
7608 (25, user_id_high_opt, option),
7609 (27, self.context.channel_keys_id, required),
7610 (28, holder_max_accepted_htlcs, option),
7611 (29, self.context.temporary_channel_id, option),
7612 (31, channel_pending_event_emitted, option),
7613 (35, pending_outbound_skimmed_fees, optional_vec),
7614 (37, holding_cell_skimmed_fees, optional_vec),
7615 (38, self.context.is_batch_funding, option),
7622 const MAX_ALLOC_SIZE: usize = 64*1024;
7623 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7625 ES::Target: EntropySource,
7626 SP::Target: SignerProvider
7628 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7629 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7630 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7632 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7633 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7634 // the low bytes now and the high bytes later.
7635 let user_id_low: u64 = Readable::read(reader)?;
7637 let mut config = Some(LegacyChannelConfig::default());
7639 // Read the old serialization of the ChannelConfig from version 0.0.98.
7640 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7641 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7642 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7643 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7645 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7646 let mut _val: u64 = Readable::read(reader)?;
7649 let channel_id = Readable::read(reader)?;
7650 let channel_state = Readable::read(reader)?;
7651 let channel_value_satoshis = Readable::read(reader)?;
7653 let latest_monitor_update_id = Readable::read(reader)?;
7655 let mut keys_data = None;
7657 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7658 // the `channel_keys_id` TLV is present below.
7659 let keys_len: u32 = Readable::read(reader)?;
7660 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7661 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7662 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7663 let mut data = [0; 1024];
7664 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7665 reader.read_exact(read_slice)?;
7666 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7670 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7671 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7672 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7675 let destination_script = Readable::read(reader)?;
7677 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7678 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7679 let value_to_self_msat = Readable::read(reader)?;
7681 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7683 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7684 for _ in 0..pending_inbound_htlc_count {
7685 pending_inbound_htlcs.push(InboundHTLCOutput {
7686 htlc_id: Readable::read(reader)?,
7687 amount_msat: Readable::read(reader)?,
7688 cltv_expiry: Readable::read(reader)?,
7689 payment_hash: Readable::read(reader)?,
7690 state: match <u8 as Readable>::read(reader)? {
7691 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7692 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7693 3 => InboundHTLCState::Committed,
7694 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7695 _ => return Err(DecodeError::InvalidValue),
7700 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7701 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7702 for _ in 0..pending_outbound_htlc_count {
7703 pending_outbound_htlcs.push(OutboundHTLCOutput {
7704 htlc_id: Readable::read(reader)?,
7705 amount_msat: Readable::read(reader)?,
7706 cltv_expiry: Readable::read(reader)?,
7707 payment_hash: Readable::read(reader)?,
7708 source: Readable::read(reader)?,
7709 state: match <u8 as Readable>::read(reader)? {
7710 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7711 1 => OutboundHTLCState::Committed,
7713 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7714 OutboundHTLCState::RemoteRemoved(option.into())
7717 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7718 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7721 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7722 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7724 _ => return Err(DecodeError::InvalidValue),
7726 skimmed_fee_msat: None,
7730 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7731 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7732 for _ in 0..holding_cell_htlc_update_count {
7733 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7734 0 => HTLCUpdateAwaitingACK::AddHTLC {
7735 amount_msat: Readable::read(reader)?,
7736 cltv_expiry: Readable::read(reader)?,
7737 payment_hash: Readable::read(reader)?,
7738 source: Readable::read(reader)?,
7739 onion_routing_packet: Readable::read(reader)?,
7740 skimmed_fee_msat: None,
7742 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7743 payment_preimage: Readable::read(reader)?,
7744 htlc_id: Readable::read(reader)?,
7746 2 => HTLCUpdateAwaitingACK::FailHTLC {
7747 htlc_id: Readable::read(reader)?,
7748 err_packet: Readable::read(reader)?,
7750 _ => return Err(DecodeError::InvalidValue),
7754 let resend_order = match <u8 as Readable>::read(reader)? {
7755 0 => RAACommitmentOrder::CommitmentFirst,
7756 1 => RAACommitmentOrder::RevokeAndACKFirst,
7757 _ => return Err(DecodeError::InvalidValue),
7760 let monitor_pending_channel_ready = Readable::read(reader)?;
7761 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7762 let monitor_pending_commitment_signed = Readable::read(reader)?;
7764 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7765 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7766 for _ in 0..monitor_pending_forwards_count {
7767 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7770 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7771 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7772 for _ in 0..monitor_pending_failures_count {
7773 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7776 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7778 let holding_cell_update_fee = Readable::read(reader)?;
7780 let next_holder_htlc_id = Readable::read(reader)?;
7781 let next_counterparty_htlc_id = Readable::read(reader)?;
7782 let update_time_counter = Readable::read(reader)?;
7783 let feerate_per_kw = Readable::read(reader)?;
7785 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7786 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7787 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7788 // consider the stale state on reload.
7789 match <u8 as Readable>::read(reader)? {
7792 let _: u32 = Readable::read(reader)?;
7793 let _: u64 = Readable::read(reader)?;
7794 let _: Signature = Readable::read(reader)?;
7796 _ => return Err(DecodeError::InvalidValue),
7799 let funding_tx_confirmed_in = Readable::read(reader)?;
7800 let funding_tx_confirmation_height = Readable::read(reader)?;
7801 let short_channel_id = Readable::read(reader)?;
7803 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7804 let holder_dust_limit_satoshis = Readable::read(reader)?;
7805 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7806 let mut counterparty_selected_channel_reserve_satoshis = None;
7808 // Read the old serialization from version 0.0.98.
7809 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7811 // Read the 8 bytes of backwards-compatibility data.
7812 let _dummy: u64 = Readable::read(reader)?;
7814 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7815 let holder_htlc_minimum_msat = Readable::read(reader)?;
7816 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7818 let mut minimum_depth = None;
7820 // Read the old serialization from version 0.0.98.
7821 minimum_depth = Some(Readable::read(reader)?);
7823 // Read the 4 bytes of backwards-compatibility data.
7824 let _dummy: u32 = Readable::read(reader)?;
7827 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7829 1 => Some(CounterpartyForwardingInfo {
7830 fee_base_msat: Readable::read(reader)?,
7831 fee_proportional_millionths: Readable::read(reader)?,
7832 cltv_expiry_delta: Readable::read(reader)?,
7834 _ => return Err(DecodeError::InvalidValue),
7837 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7838 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7840 let counterparty_cur_commitment_point = Readable::read(reader)?;
7842 let counterparty_prev_commitment_point = Readable::read(reader)?;
7843 let counterparty_node_id = Readable::read(reader)?;
7845 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7846 let commitment_secrets = Readable::read(reader)?;
7848 let channel_update_status = Readable::read(reader)?;
7850 #[cfg(any(test, fuzzing))]
7851 let mut historical_inbound_htlc_fulfills = HashSet::new();
7852 #[cfg(any(test, fuzzing))]
7854 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7855 for _ in 0..htlc_fulfills_len {
7856 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7860 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7861 Some((feerate, if channel_parameters.is_outbound_from_holder {
7862 FeeUpdateState::Outbound
7864 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7870 let mut announcement_sigs = None;
7871 let mut target_closing_feerate_sats_per_kw = None;
7872 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7873 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7874 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7875 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7876 // only, so we default to that if none was written.
7877 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7878 let mut channel_creation_height = Some(serialized_height);
7879 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7881 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7882 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7883 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7884 let mut latest_inbound_scid_alias = None;
7885 let mut outbound_scid_alias = None;
7886 let mut channel_pending_event_emitted = None;
7887 let mut channel_ready_event_emitted = None;
7889 let mut user_id_high_opt: Option<u64> = None;
7890 let mut channel_keys_id: Option<[u8; 32]> = None;
7891 let mut temporary_channel_id: Option<ChannelId> = None;
7892 let mut holder_max_accepted_htlcs: Option<u16> = None;
7894 let mut blocked_monitor_updates = Some(Vec::new());
7896 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7897 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7899 let mut is_batch_funding: Option<()> = None;
7901 read_tlv_fields!(reader, {
7902 (0, announcement_sigs, option),
7903 (1, minimum_depth, option),
7904 (2, channel_type, option),
7905 (3, counterparty_selected_channel_reserve_satoshis, option),
7906 (4, holder_selected_channel_reserve_satoshis, option),
7907 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7908 (6, holder_max_htlc_value_in_flight_msat, option),
7909 (7, shutdown_scriptpubkey, option),
7910 (8, blocked_monitor_updates, optional_vec),
7911 (9, target_closing_feerate_sats_per_kw, option),
7912 (11, monitor_pending_finalized_fulfills, optional_vec),
7913 (13, channel_creation_height, option),
7914 (15, preimages_opt, optional_vec),
7915 (17, announcement_sigs_state, option),
7916 (19, latest_inbound_scid_alias, option),
7917 (21, outbound_scid_alias, option),
7918 (23, channel_ready_event_emitted, option),
7919 (25, user_id_high_opt, option),
7920 (27, channel_keys_id, option),
7921 (28, holder_max_accepted_htlcs, option),
7922 (29, temporary_channel_id, option),
7923 (31, channel_pending_event_emitted, option),
7924 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7925 (37, holding_cell_skimmed_fees_opt, optional_vec),
7926 (38, is_batch_funding, option),
7929 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7930 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7931 // If we've gotten to the funding stage of the channel, populate the signer with its
7932 // required channel parameters.
7933 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7934 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7935 holder_signer.provide_channel_parameters(&channel_parameters);
7937 (channel_keys_id, holder_signer)
7939 // `keys_data` can be `None` if we had corrupted data.
7940 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7941 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7942 (holder_signer.channel_keys_id(), holder_signer)
7945 if let Some(preimages) = preimages_opt {
7946 let mut iter = preimages.into_iter();
7947 for htlc in pending_outbound_htlcs.iter_mut() {
7949 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7950 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7952 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7953 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7958 // We expect all preimages to be consumed above
7959 if iter.next().is_some() {
7960 return Err(DecodeError::InvalidValue);
7964 let chan_features = channel_type.as_ref().unwrap();
7965 if !chan_features.is_subset(our_supported_features) {
7966 // If the channel was written by a new version and negotiated with features we don't
7967 // understand yet, refuse to read it.
7968 return Err(DecodeError::UnknownRequiredFeature);
7971 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7972 // To account for that, we're proactively setting/overriding the field here.
7973 channel_parameters.channel_type_features = chan_features.clone();
7975 let mut secp_ctx = Secp256k1::new();
7976 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7978 // `user_id` used to be a single u64 value. In order to remain backwards
7979 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7980 // separate u64 values.
7981 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7983 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7985 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7986 let mut iter = skimmed_fees.into_iter();
7987 for htlc in pending_outbound_htlcs.iter_mut() {
7988 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7990 // We expect all skimmed fees to be consumed above
7991 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7993 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7994 let mut iter = skimmed_fees.into_iter();
7995 for htlc in holding_cell_htlc_updates.iter_mut() {
7996 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7997 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8000 // We expect all skimmed fees to be consumed above
8001 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8005 context: ChannelContext {
8008 config: config.unwrap(),
8012 // Note that we don't care about serializing handshake limits as we only ever serialize
8013 // channel data after the handshake has completed.
8014 inbound_handshake_limits_override: None,
8017 temporary_channel_id,
8019 announcement_sigs_state: announcement_sigs_state.unwrap(),
8021 channel_value_satoshis,
8023 latest_monitor_update_id,
8025 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8026 shutdown_scriptpubkey,
8029 cur_holder_commitment_transaction_number,
8030 cur_holder_commitment_point: None,
8031 prev_holder_commitment_secret: None,
8032 cur_counterparty_commitment_transaction_number,
8035 holder_max_accepted_htlcs,
8036 pending_inbound_htlcs,
8037 pending_outbound_htlcs,
8038 holding_cell_htlc_updates,
8042 monitor_pending_channel_ready,
8043 monitor_pending_revoke_and_ack,
8044 monitor_pending_commitment_signed,
8045 monitor_pending_forwards,
8046 monitor_pending_failures,
8047 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8049 signer_pending_commitment_update: false,
8050 signer_pending_revoke_and_ack: false,
8051 signer_pending_funding: false,
8052 signer_pending_channel_ready: false,
8053 signer_pending_commitment_point: true,
8054 signer_pending_released_secret: true,
8057 holding_cell_update_fee,
8058 next_holder_htlc_id,
8059 next_counterparty_htlc_id,
8060 update_time_counter,
8063 #[cfg(debug_assertions)]
8064 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8065 #[cfg(debug_assertions)]
8066 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8068 last_sent_closing_fee: None,
8069 pending_counterparty_closing_signed: None,
8070 closing_fee_limits: None,
8071 target_closing_feerate_sats_per_kw,
8073 funding_tx_confirmed_in,
8074 funding_tx_confirmation_height,
8076 channel_creation_height: channel_creation_height.unwrap(),
8078 counterparty_dust_limit_satoshis,
8079 holder_dust_limit_satoshis,
8080 counterparty_max_htlc_value_in_flight_msat,
8081 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8082 counterparty_selected_channel_reserve_satoshis,
8083 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8084 counterparty_htlc_minimum_msat,
8085 holder_htlc_minimum_msat,
8086 counterparty_max_accepted_htlcs,
8089 counterparty_forwarding_info,
8091 channel_transaction_parameters: channel_parameters,
8092 funding_transaction,
8095 counterparty_cur_commitment_point,
8096 counterparty_prev_commitment_point,
8097 counterparty_node_id,
8099 counterparty_shutdown_scriptpubkey,
8103 channel_update_status,
8104 closing_signed_in_flight: false,
8108 #[cfg(any(test, fuzzing))]
8109 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8110 #[cfg(any(test, fuzzing))]
8111 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8113 workaround_lnd_bug_4006: None,
8114 sent_message_awaiting_response: None,
8116 latest_inbound_scid_alias,
8117 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8118 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8120 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8121 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8123 #[cfg(any(test, fuzzing))]
8124 historical_inbound_htlc_fulfills,
8126 channel_type: channel_type.unwrap(),
8129 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8138 use bitcoin::blockdata::constants::ChainHash;
8139 use bitcoin::blockdata::script::{Script, Builder};
8140 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8141 use bitcoin::blockdata::opcodes;
8142 use bitcoin::network::constants::Network;
8144 use crate::ln::PaymentHash;
8145 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8146 use crate::ln::channel::InitFeatures;
8147 use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, commit_tx_fee_msat};
8148 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8149 use crate::ln::features::ChannelTypeFeatures;
8150 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8151 use crate::ln::script::ShutdownScript;
8152 use crate::ln::chan_utils;
8153 use crate::ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
8154 use crate::chain::BestBlock;
8155 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8156 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8157 use crate::chain::transaction::OutPoint;
8158 use crate::routing::router::Path;
8159 use crate::util::config::UserConfig;
8160 use crate::util::errors::APIError;
8161 use crate::util::test_utils;
8162 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8163 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8164 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8165 use bitcoin::secp256k1::{SecretKey,PublicKey};
8166 use bitcoin::hashes::sha256::Hash as Sha256;
8167 use bitcoin::hashes::Hash;
8168 use bitcoin::hash_types::WPubkeyHash;
8169 use bitcoin::PackedLockTime;
8170 use bitcoin::util::address::WitnessVersion;
8171 use crate::prelude::*;
8173 struct TestFeeEstimator {
8176 impl FeeEstimator for TestFeeEstimator {
8177 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8183 fn test_max_funding_satoshis_no_wumbo() {
8184 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8185 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8186 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8190 fn test_no_fee_check_overflow() {
8191 // Previously, calling `check_remote_fee` with a fee of 0xffffffff would overflow in
8192 // arithmetic, causing a panic with debug assertions enabled.
8193 let fee_est = TestFeeEstimator { fee_est: 42 };
8194 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8195 assert!(Channel::<&TestKeysInterface>::check_remote_fee(
8196 &ChannelTypeFeatures::only_static_remote_key(), &bounded_fee_estimator,
8197 u32::max_value(), None, &&test_utils::TestLogger::new()).is_err());
8201 signer: InMemorySigner,
8204 impl EntropySource for Keys {
8205 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8208 impl SignerProvider for Keys {
8209 type Signer = InMemorySigner;
8211 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8212 self.signer.channel_keys_id()
8215 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::Signer {
8219 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
8221 fn get_destination_script(&self) -> Result<Script, ()> {
8222 let secp_ctx = Secp256k1::signing_only();
8223 let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8224 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8225 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&channel_monitor_claim_key_hash[..]).into_script())
8228 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8229 let secp_ctx = Secp256k1::signing_only();
8230 let channel_close_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8231 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8235 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8236 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8237 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode(hex).unwrap()[..]).unwrap())
8241 fn upfront_shutdown_script_incompatibility() {
8242 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8243 let non_v0_segwit_shutdown_script =
8244 ShutdownScript::new_witness_program(WitnessVersion::V16, &[0, 40]).unwrap();
8246 let seed = [42; 32];
8247 let network = Network::Testnet;
8248 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8249 keys_provider.expect(OnGetShutdownScriptpubkey {
8250 returns: non_v0_segwit_shutdown_script.clone(),
8253 let secp_ctx = Secp256k1::new();
8254 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8255 let config = UserConfig::default();
8256 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42) {
8257 Err(APIError::IncompatibleShutdownScript { script }) => {
8258 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8260 Err(e) => panic!("Unexpected error: {:?}", e),
8261 Ok(_) => panic!("Expected error"),
8265 // Check that, during channel creation, we use the same feerate in the open channel message
8266 // as we do in the Channel object creation itself.
8268 fn test_open_channel_msg_fee() {
8269 let original_fee = 253;
8270 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8271 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8272 let secp_ctx = Secp256k1::new();
8273 let seed = [42; 32];
8274 let network = Network::Testnet;
8275 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8277 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8278 let config = UserConfig::default();
8279 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8281 // Now change the fee so we can check that the fee in the open_channel message is the
8282 // same as the old fee.
8283 fee_est.fee_est = 500;
8284 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
8285 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8289 fn test_holder_vs_counterparty_dust_limit() {
8290 // Test that when calculating the local and remote commitment transaction fees, the correct
8291 // dust limits are used.
8292 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8293 let secp_ctx = Secp256k1::new();
8294 let seed = [42; 32];
8295 let network = Network::Testnet;
8296 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8297 let logger = test_utils::TestLogger::new();
8298 let best_block = BestBlock::from_network(network);
8300 // Go through the flow of opening a channel between two nodes, making sure
8301 // they have different dust limits.
8303 // Create Node A's channel pointing to Node B's pubkey
8304 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8305 let config = UserConfig::default();
8306 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8308 // Create Node B's channel by receiving Node A's open_channel message
8309 // Make sure A's dust limit is as we expect.
8310 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
8311 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8312 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8314 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8315 let mut accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap();
8316 accept_channel_msg.dust_limit_satoshis = 546;
8317 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8318 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8320 // Node A --> Node B: funding created
8321 let output_script = node_a_chan.context.get_funding_redeemscript();
8322 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8323 value: 10000000, script_pubkey: output_script.clone(),
8325 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8326 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8327 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8329 // Node B --> Node A: funding signed
8330 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8332 // Put some inbound and outbound HTLCs in A's channel.
8333 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8334 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8336 amount_msat: htlc_amount_msat,
8337 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
8338 cltv_expiry: 300000000,
8339 state: InboundHTLCState::Committed,
8342 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8344 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8345 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
8346 cltv_expiry: 200000000,
8347 state: OutboundHTLCState::Committed,
8348 source: HTLCSource::OutboundRoute {
8349 path: Path { hops: Vec::new(), blinded_tail: None },
8350 session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8351 first_hop_htlc_msat: 548,
8352 payment_id: PaymentId([42; 32]),
8354 skimmed_fee_msat: None,
8357 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8358 // the dust limit check.
8359 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8360 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8361 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8362 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8364 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8365 // of the HTLCs are seen to be above the dust limit.
8366 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8367 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8368 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8369 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8370 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8374 fn test_timeout_vs_success_htlc_dust_limit() {
8375 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8376 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8377 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8378 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8379 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8380 let secp_ctx = Secp256k1::new();
8381 let seed = [42; 32];
8382 let network = Network::Testnet;
8383 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8385 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8386 let config = UserConfig::default();
8387 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8389 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8390 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8392 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8393 // counted as dust when it shouldn't be.
8394 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8395 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8396 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8397 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8399 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8400 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8401 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8402 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8403 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8405 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8407 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8408 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8409 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8410 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8411 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8413 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8414 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8415 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8416 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8417 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8421 fn channel_reestablish_no_updates() {
8422 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8423 let logger = test_utils::TestLogger::new();
8424 let secp_ctx = Secp256k1::new();
8425 let seed = [42; 32];
8426 let network = Network::Testnet;
8427 let best_block = BestBlock::from_network(network);
8428 let chain_hash = ChainHash::using_genesis_block(network);
8429 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8431 // Go through the flow of opening a channel between two nodes.
8433 // Create Node A's channel pointing to Node B's pubkey
8434 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8435 let config = UserConfig::default();
8436 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8438 // Create Node B's channel by receiving Node A's open_channel message
8439 let open_channel_msg = node_a_chan.get_open_channel(chain_hash).unwrap();
8440 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8441 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8443 // Node B --> Node A: accept channel
8444 let accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap();
8445 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8447 // Node A --> Node B: funding created
8448 let output_script = node_a_chan.context.get_funding_redeemscript();
8449 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8450 value: 10000000, script_pubkey: output_script.clone(),
8452 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8453 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8454 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8456 // Node B --> Node A: funding signed
8457 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8459 // Now disconnect the two nodes and check that the commitment point in
8460 // Node B's channel_reestablish message is sane.
8461 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8462 let msg = node_b_chan.get_channel_reestablish(&&logger);
8463 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8464 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8465 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8467 // Check that the commitment point in Node A's channel_reestablish message
8469 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8470 let msg = node_a_chan.get_channel_reestablish(&&logger);
8471 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8472 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8473 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8477 fn test_configured_holder_max_htlc_value_in_flight() {
8478 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8479 let logger = test_utils::TestLogger::new();
8480 let secp_ctx = Secp256k1::new();
8481 let seed = [42; 32];
8482 let network = Network::Testnet;
8483 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8484 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8485 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8487 let mut config_2_percent = UserConfig::default();
8488 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8489 let mut config_99_percent = UserConfig::default();
8490 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8491 let mut config_0_percent = UserConfig::default();
8492 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8493 let mut config_101_percent = UserConfig::default();
8494 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8496 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8497 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8498 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8499 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42).unwrap();
8500 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8501 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8503 // Test with the upper bound - 1 of valid values (99%).
8504 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42).unwrap();
8505 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8506 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8508 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
8510 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8511 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8512 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8513 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8514 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8515 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8517 // Test with the upper bound - 1 of valid values (99%).
8518 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8519 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8520 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8522 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8523 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8524 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42).unwrap();
8525 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8526 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8528 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8529 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8531 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42).unwrap();
8532 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8533 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8535 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8536 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8537 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8538 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8539 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8541 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8542 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8544 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8545 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8546 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8550 fn test_configured_holder_selected_channel_reserve_satoshis() {
8552 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8553 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8554 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8556 // Test with valid but unreasonably high channel reserves
8557 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8558 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8559 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8561 // Test with calculated channel reserve less than lower bound
8562 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8563 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8565 // Test with invalid channel reserves since sum of both is greater than or equal
8567 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8568 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8571 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8572 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8573 let logger = test_utils::TestLogger::new();
8574 let secp_ctx = Secp256k1::new();
8575 let seed = [42; 32];
8576 let network = Network::Testnet;
8577 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8578 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8579 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8582 let mut outbound_node_config = UserConfig::default();
8583 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8584 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
8586 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8587 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8589 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
8590 let mut inbound_node_config = UserConfig::default();
8591 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8593 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8594 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8596 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8598 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8599 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8601 // Channel Negotiations failed
8602 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8603 assert!(result.is_err());
8608 fn channel_update() {
8609 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8610 let logger = test_utils::TestLogger::new();
8611 let secp_ctx = Secp256k1::new();
8612 let seed = [42; 32];
8613 let network = Network::Testnet;
8614 let best_block = BestBlock::from_network(network);
8615 let chain_hash = ChainHash::using_genesis_block(network);
8616 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8618 // Create Node A's channel pointing to Node B's pubkey
8619 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8620 let config = UserConfig::default();
8621 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
8623 // Create Node B's channel by receiving Node A's open_channel message
8624 // Make sure A's dust limit is as we expect.
8625 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
8626 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8627 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8629 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8630 let mut accept_channel_msg = node_b_chan.accept_inbound_channel().unwrap();
8631 accept_channel_msg.dust_limit_satoshis = 546;
8632 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8633 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8635 // Node A --> Node B: funding created
8636 let output_script = node_a_chan.context.get_funding_redeemscript();
8637 let tx = Transaction { version: 1, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8638 value: 10000000, script_pubkey: output_script.clone(),
8640 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8641 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8642 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8644 // Node B --> Node A: funding signed
8645 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8647 // Make sure that receiving a channel update will update the Channel as expected.
8648 let update = ChannelUpdate {
8649 contents: UnsignedChannelUpdate {
8651 short_channel_id: 0,
8654 cltv_expiry_delta: 100,
8655 htlc_minimum_msat: 5,
8656 htlc_maximum_msat: MAX_VALUE_MSAT,
8658 fee_proportional_millionths: 11,
8659 excess_data: Vec::new(),
8661 signature: Signature::from(unsafe { FFISignature::new() })
8663 assert!(node_a_chan.channel_update(&update).unwrap());
8665 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8666 // change our official htlc_minimum_msat.
8667 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8668 match node_a_chan.context.counterparty_forwarding_info() {
8670 assert_eq!(info.cltv_expiry_delta, 100);
8671 assert_eq!(info.fee_base_msat, 110);
8672 assert_eq!(info.fee_proportional_millionths, 11);
8674 None => panic!("expected counterparty forwarding info to be Some")
8677 assert!(!node_a_chan.channel_update(&update).unwrap());
8680 #[cfg(feature = "_test_vectors")]
8682 fn outbound_commitment_test() {
8683 use bitcoin::util::sighash;
8684 use bitcoin::consensus::encode::serialize;
8685 use bitcoin::blockdata::transaction::EcdsaSighashType;
8686 use bitcoin::hashes::hex::FromHex;
8687 use bitcoin::hash_types::Txid;
8688 use bitcoin::secp256k1::Message;
8689 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, EcdsaChannelSigner};
8690 use crate::ln::PaymentPreimage;
8691 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8692 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8693 use crate::util::logger::Logger;
8694 use crate::sync::Arc;
8696 // Test vectors from BOLT 3 Appendices C and F (anchors):
8697 let feeest = TestFeeEstimator{fee_est: 15000};
8698 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8699 let secp_ctx = Secp256k1::new();
8701 let mut signer = InMemorySigner::new(
8703 SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8704 SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8705 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8706 SecretKey::from_slice(&hex::decode("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8707 SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8709 // These aren't set in the test vectors:
8710 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8716 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8717 hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8718 let keys_provider = Keys { signer: signer.clone() };
8720 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8721 let mut config = UserConfig::default();
8722 config.channel_handshake_config.announced_channel = false;
8723 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42).unwrap(); // Nothing uses their network key in this test
8724 chan.context.holder_dust_limit_satoshis = 546;
8725 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8727 let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8729 let counterparty_pubkeys = ChannelPublicKeys {
8730 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8731 revocation_basepoint: PublicKey::from_slice(&hex::decode("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap(),
8732 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8733 delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8734 htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
8736 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8737 CounterpartyChannelTransactionParameters {
8738 pubkeys: counterparty_pubkeys.clone(),
8739 selected_contest_delay: 144
8741 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8742 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8744 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8745 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8747 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8748 hex::decode("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8750 assert_eq!(counterparty_pubkeys.htlc_basepoint.serialize()[..],
8751 hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8753 // We can't just use build_next_holder_transaction_keys here as the per_commitment_secret is not
8754 // derived from a commitment_seed, so instead we copy it here and call
8755 // build_commitment_transaction.
8756 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8757 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8758 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8759 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8760 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8762 macro_rules! test_commitment {
8763 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8764 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8765 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8769 macro_rules! test_commitment_with_anchors {
8770 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8771 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8772 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8776 macro_rules! test_commitment_common {
8777 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8778 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8780 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8781 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8783 let htlcs = commitment_stats.htlcs_included.drain(..)
8784 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8786 (commitment_stats.tx, htlcs)
8788 let trusted_tx = commitment_tx.trust();
8789 let unsigned_tx = trusted_tx.built_transaction();
8790 let redeemscript = chan.context.get_funding_redeemscript();
8791 let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
8792 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8793 log_trace!(logger, "unsigned_tx = {}", hex::encode(serialize(&unsigned_tx.transaction)));
8794 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8796 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8797 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8798 let mut counterparty_htlc_sigs = Vec::new();
8799 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8801 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8802 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8803 counterparty_htlc_sigs.push(remote_signature);
8805 assert_eq!(htlcs.len(), per_htlc.len());
8807 let holder_commitment_tx = HolderCommitmentTransaction::new(
8808 commitment_tx.clone(),
8809 counterparty_signature,
8810 counterparty_htlc_sigs,
8811 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8812 chan.context.counterparty_funding_pubkey()
8814 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8815 assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8817 let funding_redeemscript = chan.context.get_funding_redeemscript();
8818 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8819 assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
8821 // ((htlc, counterparty_sig), (index, holder_sig))
8822 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8825 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8826 let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8828 let ref htlc = htlcs[$htlc_idx];
8829 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8830 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8831 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8832 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8833 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8834 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8835 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).is_ok(), "verify counterparty htlc sig");
8837 let mut preimage: Option<PaymentPreimage> = None;
8840 let out = PaymentHash(Sha256::hash(&[i; 32]).into_inner());
8841 if out == htlc.payment_hash {
8842 preimage = Some(PaymentPreimage([i; 32]));
8846 assert!(preimage.is_some());
8849 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8850 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8851 channel_derivation_parameters: ChannelDerivationParameters {
8852 value_satoshis: chan.context.channel_value_satoshis,
8853 keys_id: chan.context.channel_keys_id,
8854 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8856 commitment_txid: trusted_tx.txid(),
8857 per_commitment_number: trusted_tx.commitment_number(),
8858 per_commitment_point: trusted_tx.per_commitment_point(),
8859 feerate_per_kw: trusted_tx.feerate_per_kw(),
8861 preimage: preimage.clone(),
8862 counterparty_sig: *htlc_counterparty_sig,
8863 }, &secp_ctx).unwrap();
8864 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8865 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8867 let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
8868 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8869 let trusted_tx = holder_commitment_tx.trust();
8870 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8871 log_trace!(logger, "htlc_tx = {}", hex::encode(serialize(&htlc_tx)));
8872 assert_eq!(serialize(&htlc_tx)[..], hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
8874 assert!(htlc_counterparty_sig_iter.next().is_none());
8878 // anchors: simple commitment tx with no HTLCs and single anchor
8879 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8880 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8881 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8883 // simple commitment tx with no HTLCs
8884 chan.context.value_to_self_msat = 7000000000;
8886 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8887 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8888 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8890 // anchors: simple commitment tx with no HTLCs
8891 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8892 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8893 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8895 chan.context.pending_inbound_htlcs.push({
8896 let mut out = InboundHTLCOutput{
8898 amount_msat: 1000000,
8900 payment_hash: PaymentHash([0; 32]),
8901 state: InboundHTLCState::Committed,
8903 out.payment_hash.0 = Sha256::hash(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).into_inner();
8906 chan.context.pending_inbound_htlcs.push({
8907 let mut out = InboundHTLCOutput{
8909 amount_msat: 2000000,
8911 payment_hash: PaymentHash([0; 32]),
8912 state: InboundHTLCState::Committed,
8914 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
8917 chan.context.pending_outbound_htlcs.push({
8918 let mut out = OutboundHTLCOutput{
8920 amount_msat: 2000000,
8922 payment_hash: PaymentHash([0; 32]),
8923 state: OutboundHTLCState::Committed,
8924 source: HTLCSource::dummy(),
8925 skimmed_fee_msat: None,
8927 out.payment_hash.0 = Sha256::hash(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).into_inner();
8930 chan.context.pending_outbound_htlcs.push({
8931 let mut out = OutboundHTLCOutput{
8933 amount_msat: 3000000,
8935 payment_hash: PaymentHash([0; 32]),
8936 state: OutboundHTLCState::Committed,
8937 source: HTLCSource::dummy(),
8938 skimmed_fee_msat: None,
8940 out.payment_hash.0 = Sha256::hash(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).into_inner();
8943 chan.context.pending_inbound_htlcs.push({
8944 let mut out = InboundHTLCOutput{
8946 amount_msat: 4000000,
8948 payment_hash: PaymentHash([0; 32]),
8949 state: InboundHTLCState::Committed,
8951 out.payment_hash.0 = Sha256::hash(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).into_inner();
8955 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8956 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8957 chan.context.feerate_per_kw = 0;
8959 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8960 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8961 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8964 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8965 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8966 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8969 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8970 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8971 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8974 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8975 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8976 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8979 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8980 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8981 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8984 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8985 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8986 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8989 // commitment tx with seven outputs untrimmed (maximum feerate)
8990 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8991 chan.context.feerate_per_kw = 647;
8993 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8994 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8995 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8998 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8999 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9000 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9003 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9004 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9005 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9008 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9009 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9010 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9013 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9014 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9015 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9018 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9019 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9020 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9023 // commitment tx with six outputs untrimmed (minimum feerate)
9024 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9025 chan.context.feerate_per_kw = 648;
9027 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9028 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9029 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9032 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9033 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9034 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9037 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9038 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9039 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9042 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9043 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9044 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9047 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9048 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9049 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9052 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9053 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9054 chan.context.feerate_per_kw = 645;
9055 chan.context.holder_dust_limit_satoshis = 1001;
9057 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9058 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9059 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9062 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9063 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9064 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9067 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9068 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9069 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9072 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9073 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9074 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9077 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9078 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9079 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9082 // commitment tx with six outputs untrimmed (maximum feerate)
9083 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9084 chan.context.feerate_per_kw = 2069;
9085 chan.context.holder_dust_limit_satoshis = 546;
9087 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9088 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9089 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9092 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9093 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9094 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9097 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9098 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9099 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9102 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9103 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9104 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9107 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9108 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9109 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9112 // commitment tx with five outputs untrimmed (minimum feerate)
9113 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9114 chan.context.feerate_per_kw = 2070;
9116 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9117 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9118 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9121 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9122 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9123 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9126 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9127 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9128 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9131 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9132 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9133 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9136 // commitment tx with five outputs untrimmed (maximum feerate)
9137 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9138 chan.context.feerate_per_kw = 2194;
9140 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9141 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9142 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9145 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9146 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9147 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9150 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9151 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9152 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9155 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9156 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9157 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9160 // commitment tx with four outputs untrimmed (minimum feerate)
9161 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9162 chan.context.feerate_per_kw = 2195;
9164 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9165 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9166 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9169 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9170 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9171 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9174 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9175 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9176 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9179 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9180 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9181 chan.context.feerate_per_kw = 2185;
9182 chan.context.holder_dust_limit_satoshis = 2001;
9183 let cached_channel_type = chan.context.channel_type;
9184 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9186 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9187 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9188 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9191 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9192 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9193 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9196 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9197 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9198 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9201 // commitment tx with four outputs untrimmed (maximum feerate)
9202 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9203 chan.context.feerate_per_kw = 3702;
9204 chan.context.holder_dust_limit_satoshis = 546;
9205 chan.context.channel_type = cached_channel_type.clone();
9207 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9208 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9209 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9212 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9213 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9214 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9217 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9218 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9219 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9222 // commitment tx with three outputs untrimmed (minimum feerate)
9223 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9224 chan.context.feerate_per_kw = 3703;
9226 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9227 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9228 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9231 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9232 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9233 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9236 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9237 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9238 chan.context.feerate_per_kw = 3687;
9239 chan.context.holder_dust_limit_satoshis = 3001;
9240 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9242 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9243 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9244 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9247 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9248 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9249 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9252 // commitment tx with three outputs untrimmed (maximum feerate)
9253 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9254 chan.context.feerate_per_kw = 4914;
9255 chan.context.holder_dust_limit_satoshis = 546;
9256 chan.context.channel_type = cached_channel_type.clone();
9258 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9259 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9260 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9263 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9264 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9265 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9268 // commitment tx with two outputs untrimmed (minimum feerate)
9269 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9270 chan.context.feerate_per_kw = 4915;
9271 chan.context.holder_dust_limit_satoshis = 546;
9273 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9274 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9275 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9277 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9278 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9279 chan.context.feerate_per_kw = 4894;
9280 chan.context.holder_dust_limit_satoshis = 4001;
9281 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9283 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9284 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9285 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9287 // commitment tx with two outputs untrimmed (maximum feerate)
9288 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9289 chan.context.feerate_per_kw = 9651180;
9290 chan.context.holder_dust_limit_satoshis = 546;
9291 chan.context.channel_type = cached_channel_type.clone();
9293 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9294 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9295 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9297 // commitment tx with one output untrimmed (minimum feerate)
9298 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9299 chan.context.feerate_per_kw = 9651181;
9301 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9302 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9303 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9305 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9306 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9307 chan.context.feerate_per_kw = 6216010;
9308 chan.context.holder_dust_limit_satoshis = 4001;
9309 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9311 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9312 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9313 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9315 // commitment tx with fee greater than funder amount
9316 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9317 chan.context.feerate_per_kw = 9651936;
9318 chan.context.holder_dust_limit_satoshis = 546;
9319 chan.context.channel_type = cached_channel_type;
9321 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9322 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9323 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9325 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9326 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9327 chan.context.feerate_per_kw = 253;
9328 chan.context.pending_inbound_htlcs.clear();
9329 chan.context.pending_inbound_htlcs.push({
9330 let mut out = InboundHTLCOutput{
9332 amount_msat: 2000000,
9334 payment_hash: PaymentHash([0; 32]),
9335 state: InboundHTLCState::Committed,
9337 out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
9340 chan.context.pending_outbound_htlcs.clear();
9341 chan.context.pending_outbound_htlcs.push({
9342 let mut out = OutboundHTLCOutput{
9344 amount_msat: 5000001,
9346 payment_hash: PaymentHash([0; 32]),
9347 state: OutboundHTLCState::Committed,
9348 source: HTLCSource::dummy(),
9349 skimmed_fee_msat: None,
9351 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
9354 chan.context.pending_outbound_htlcs.push({
9355 let mut out = OutboundHTLCOutput{
9357 amount_msat: 5000000,
9359 payment_hash: PaymentHash([0; 32]),
9360 state: OutboundHTLCState::Committed,
9361 source: HTLCSource::dummy(),
9362 skimmed_fee_msat: None,
9364 out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
9368 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9369 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9370 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9373 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9374 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9375 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9377 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9378 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9379 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9381 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9382 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9383 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9386 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9387 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9388 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9389 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9392 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9393 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9394 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9396 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9397 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9398 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9400 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9401 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9402 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9407 fn test_per_commitment_secret_gen() {
9408 // Test vectors from BOLT 3 Appendix D:
9410 let mut seed = [0; 32];
9411 seed[0..32].clone_from_slice(&hex::decode("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9412 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9413 hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9415 seed[0..32].clone_from_slice(&hex::decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9416 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9417 hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9419 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9420 hex::decode("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9422 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9423 hex::decode("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9425 seed[0..32].clone_from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9426 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9427 hex::decode("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9431 fn test_key_derivation() {
9432 // Test vectors from BOLT 3 Appendix E:
9433 let secp_ctx = Secp256k1::new();
9435 let base_secret = SecretKey::from_slice(&hex::decode("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9436 let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9438 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9439 assert_eq!(base_point.serialize()[..], hex::decode("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9441 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9442 assert_eq!(per_commitment_point.serialize()[..], hex::decode("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9444 assert_eq!(chan_utils::derive_public_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
9445 hex::decode("0235f2dbfaa89b57ec7b055afe29849ef7ddfeb1cefdb9ebdc43f5494984db29e5").unwrap()[..]);
9447 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9448 SecretKey::from_slice(&hex::decode("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9450 assert_eq!(chan_utils::derive_public_revocation_key(&secp_ctx, &per_commitment_point, &base_point).serialize()[..],
9451 hex::decode("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9453 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9454 SecretKey::from_slice(&hex::decode("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9458 fn test_zero_conf_channel_type_support() {
9459 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9460 let secp_ctx = Secp256k1::new();
9461 let seed = [42; 32];
9462 let network = Network::Testnet;
9463 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9464 let logger = test_utils::TestLogger::new();
9466 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9467 let config = UserConfig::default();
9468 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9469 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42).unwrap();
9471 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9472 channel_type_features.set_zero_conf_required();
9474 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
9475 open_channel_msg.channel_type = Some(channel_type_features);
9476 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9477 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9478 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9479 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9480 assert!(res.is_ok());
9484 fn test_supports_anchors_zero_htlc_tx_fee() {
9485 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9486 // resulting `channel_type`.
9487 let secp_ctx = Secp256k1::new();
9488 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9489 let network = Network::Testnet;
9490 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9491 let logger = test_utils::TestLogger::new();
9493 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9494 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9496 let mut config = UserConfig::default();
9497 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9499 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9500 // need to signal it.
9501 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9502 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9503 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9506 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9508 let mut expected_channel_type = ChannelTypeFeatures::empty();
9509 expected_channel_type.set_static_remote_key_required();
9510 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9512 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9513 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9514 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9517 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
9518 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9519 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9520 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9521 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9524 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9525 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9529 fn test_rejects_implicit_simple_anchors() {
9530 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9531 // each side's `InitFeatures`, it is rejected.
9532 let secp_ctx = Secp256k1::new();
9533 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9534 let network = Network::Testnet;
9535 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9536 let logger = test_utils::TestLogger::new();
9538 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9539 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9541 let config = UserConfig::default();
9543 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9544 let static_remote_key_required: u64 = 1 << 12;
9545 let simple_anchors_required: u64 = 1 << 20;
9546 let raw_init_features = static_remote_key_required | simple_anchors_required;
9547 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9549 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9550 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9551 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9554 // Set `channel_type` to `None` to force the implicit feature negotiation.
9555 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
9556 open_channel_msg.channel_type = None;
9558 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9559 // `static_remote_key`, it will fail the channel.
9560 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9561 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9562 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9563 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9565 assert!(channel_b.is_err());
9569 fn test_rejects_simple_anchors_channel_type() {
9570 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9572 let secp_ctx = Secp256k1::new();
9573 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9574 let network = Network::Testnet;
9575 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9576 let logger = test_utils::TestLogger::new();
9578 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9579 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9581 let config = UserConfig::default();
9583 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9584 let static_remote_key_required: u64 = 1 << 12;
9585 let simple_anchors_required: u64 = 1 << 20;
9586 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9587 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9588 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9589 assert!(!simple_anchors_init.requires_unknown_bits());
9590 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9592 // First, we'll try to open a channel between A and B where A requests a channel type for
9593 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9594 // B as it's not supported by LDK.
9595 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9596 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9597 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42
9600 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
9601 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9603 let res = InboundV1Channel::<&TestKeysInterface>::new(
9604 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9605 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9606 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9608 assert!(res.is_err());
9610 // Then, we'll try to open another channel where A requests a channel type for
9611 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9612 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9614 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9615 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9616 10000000, 100000, 42, &config, 0, 42
9619 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network)).unwrap();
9621 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9622 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9623 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9624 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9627 let mut accept_channel_msg = channel_b.get_accept_channel_message().unwrap();
9628 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9630 let res = channel_a.accept_channel(
9631 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9633 assert!(res.is_err());
9637 fn test_waiting_for_batch() {
9638 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9639 let logger = test_utils::TestLogger::new();
9640 let secp_ctx = Secp256k1::new();
9641 let seed = [42; 32];
9642 let network = Network::Testnet;
9643 let best_block = BestBlock::from_network(network);
9644 let chain_hash = ChainHash::using_genesis_block(network);
9645 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9647 let mut config = UserConfig::default();
9648 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9649 // channel in a batch before all channels are ready.
9650 config.channel_handshake_limits.trust_own_funding_0conf = true;
9652 // Create a channel from node a to node b that will be part of batch funding.
9653 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9654 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9659 &channelmanager::provided_init_features(&config),
9668 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9669 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9670 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9675 &channelmanager::provided_channel_type_features(&config),
9676 &channelmanager::provided_init_features(&config),
9677 &open_channel_msg.unwrap(),
9682 true, // Allow node b to send a 0conf channel_ready.
9685 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9686 node_a_chan.accept_channel(
9687 &accept_channel_msg.unwrap(),
9688 &config.channel_handshake_limits,
9689 &channelmanager::provided_init_features(&config),
9692 // Fund the channel with a batch funding transaction.
9693 let output_script = node_a_chan.context.get_funding_redeemscript();
9694 let tx = Transaction {
9696 lock_time: PackedLockTime::ZERO,
9700 value: 10000000, script_pubkey: output_script.clone(),
9703 value: 10000000, script_pubkey: Builder::new().into_script(),
9706 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9707 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9712 ).map_err(|_| ()).unwrap();
9713 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9714 &funding_created_msg.unwrap(),
9718 ).map_err(|_| ()).unwrap();
9719 let node_b_updates = node_b_chan.monitor_updating_restored(
9727 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9728 // broadcasting the funding transaction until the batch is ready.
9729 let _ = node_a_chan.funding_signed(
9730 &funding_signed_msg.unwrap(),
9735 let node_a_updates = node_a_chan.monitor_updating_restored(
9742 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9743 // as the funding transaction depends on all channels in the batch becoming ready.
9744 assert!(node_a_updates.channel_ready.is_none());
9745 assert!(node_a_updates.funding_broadcastable.is_none());
9747 node_a_chan.context.channel_state,
9748 ChannelState::FundingSent as u32 |
9749 ChannelState::WaitingForBatch as u32,
9752 // It is possible to receive a 0conf channel_ready from the remote node.
9753 node_a_chan.channel_ready(
9754 &node_b_updates.channel_ready.unwrap(),
9762 node_a_chan.context.channel_state,
9763 ChannelState::FundingSent as u32 |
9764 ChannelState::WaitingForBatch as u32 |
9765 ChannelState::TheirChannelReady as u32,
9768 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9769 node_a_chan.set_batch_ready();
9771 node_a_chan.context.channel_state,
9772 ChannelState::FundingSent as u32 |
9773 ChannelState::TheirChannelReady as u32,
9775 assert!(node_a_chan.check_get_channel_ready(0, &&logger).is_some());