1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
264 /// There are a few "states" and then a number of flags which can be applied:
265 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
266 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
267 /// move on to `ChannelReady`.
268 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
269 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
270 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
272 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
273 OurInitSent = 1 << 0,
274 /// Implies we have received their `open_channel`/`accept_channel` message
275 TheirInitSent = 1 << 1,
276 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
277 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
278 /// upon receipt of `funding_created`, so simply skip this state.
280 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
281 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
282 /// and our counterparty consider the funding transaction confirmed.
284 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
285 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
286 TheirChannelReady = 1 << 4,
287 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
288 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
289 OurChannelReady = 1 << 5,
291 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
292 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
294 PeerDisconnected = 1 << 7,
295 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
296 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
297 /// sending any outbound messages until they've managed to finish.
298 MonitorUpdateInProgress = 1 << 8,
299 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
300 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
301 /// messages as then we will be unable to determine which HTLCs they included in their
302 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
304 /// Flag is set on `ChannelReady`.
305 AwaitingRemoteRevoke = 1 << 9,
306 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
307 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
308 /// to respond with our own shutdown message when possible.
309 RemoteShutdownSent = 1 << 10,
310 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
311 /// point, we may not add any new HTLCs to the channel.
312 LocalShutdownSent = 1 << 11,
313 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
314 /// to drop us, but we store this anyway.
315 ShutdownComplete = 4096,
316 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
317 /// broadcasting of the funding transaction is being held until all channels in the batch
318 /// have received funding_signed and have their monitors persisted.
319 WaitingForBatch = 1 << 13,
321 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
322 ChannelState::LocalShutdownSent as u32 |
323 ChannelState::RemoteShutdownSent as u32;
324 const MULTI_STATE_FLAGS: u32 =
325 BOTH_SIDES_SHUTDOWN_MASK |
326 ChannelState::PeerDisconnected as u32 |
327 ChannelState::MonitorUpdateInProgress as u32;
328 const STATE_FLAGS: u32 =
330 ChannelState::TheirChannelReady as u32 |
331 ChannelState::OurChannelReady as u32 |
332 ChannelState::AwaitingRemoteRevoke as u32 |
333 ChannelState::WaitingForBatch as u32;
335 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
337 pub const DEFAULT_MAX_HTLCS: u16 = 50;
339 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
340 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
341 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
342 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
346 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
348 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
350 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
352 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
353 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
354 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
355 /// `holder_max_htlc_value_in_flight_msat`.
356 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
358 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
359 /// `option_support_large_channel` (aka wumbo channels) is not supported.
361 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
363 /// Total bitcoin supply in satoshis.
364 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
366 /// The maximum network dust limit for standard script formats. This currently represents the
367 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
368 /// transaction non-standard and thus refuses to relay it.
369 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
370 /// implementations use this value for their dust limit today.
371 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
373 /// The maximum channel dust limit we will accept from our counterparty.
374 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
376 /// The dust limit is used for both the commitment transaction outputs as well as the closing
377 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
378 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
379 /// In order to avoid having to concern ourselves with standardness during the closing process, we
380 /// simply require our counterparty to use a dust limit which will leave any segwit output
382 /// See <https://github.com/lightning/bolts/issues/905> for more details.
383 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
385 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
386 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
388 /// Used to return a simple Error back to ChannelManager. Will get converted to a
389 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
390 /// channel_id in ChannelManager.
391 pub(super) enum ChannelError {
397 impl fmt::Debug for ChannelError {
398 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
400 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
401 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
402 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
407 impl fmt::Display for ChannelError {
408 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
410 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
411 &ChannelError::Warn(ref e) => write!(f, "{}", e),
412 &ChannelError::Close(ref e) => write!(f, "{}", e),
417 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
419 pub peer_id: Option<PublicKey>,
420 pub channel_id: Option<ChannelId>,
423 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
424 fn log(&self, mut record: Record) {
425 record.peer_id = self.peer_id;
426 record.channel_id = self.channel_id;
427 self.logger.log(record)
431 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
432 where L::Target: Logger {
433 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
434 where S::Target: SignerProvider
438 peer_id: Some(context.counterparty_node_id),
439 channel_id: Some(context.channel_id),
444 macro_rules! secp_check {
445 ($res: expr, $err: expr) => {
448 Err(_) => return Err(ChannelError::Close($err)),
453 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
454 /// our counterparty or not. However, we don't want to announce updates right away to avoid
455 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
456 /// our channel_update message and track the current state here.
457 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
458 #[derive(Clone, Copy, PartialEq)]
459 pub(super) enum ChannelUpdateStatus {
460 /// We've announced the channel as enabled and are connected to our peer.
462 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
464 /// Our channel is live again, but we haven't announced the channel as enabled yet.
466 /// We've announced the channel as disabled.
470 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
472 pub enum AnnouncementSigsState {
473 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
474 /// we sent the last `AnnouncementSignatures`.
476 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
477 /// This state never appears on disk - instead we write `NotSent`.
479 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
480 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
481 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
482 /// they send back a `RevokeAndACK`.
483 /// This state never appears on disk - instead we write `NotSent`.
485 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
486 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
490 /// An enum indicating whether the local or remote side offered a given HTLC.
496 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
499 pending_htlcs_value_msat: u64,
500 on_counterparty_tx_dust_exposure_msat: u64,
501 on_holder_tx_dust_exposure_msat: u64,
502 holding_cell_msat: u64,
503 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
506 /// An enum gathering stats on commitment transaction, either local or remote.
507 struct CommitmentStats<'a> {
508 tx: CommitmentTransaction, // the transaction info
509 feerate_per_kw: u32, // the feerate included to build the transaction
510 total_fee_sat: u64, // the total fee included in the transaction
511 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
512 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
513 local_balance_msat: u64, // local balance before fees but considering dust limits
514 remote_balance_msat: u64, // remote balance before fees but considering dust limits
515 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
518 /// Used when calculating whether we or the remote can afford an additional HTLC.
519 struct HTLCCandidate {
521 origin: HTLCInitiator,
525 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
533 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
535 enum UpdateFulfillFetch {
537 monitor_update: ChannelMonitorUpdate,
538 htlc_value_msat: u64,
539 msg: Option<msgs::UpdateFulfillHTLC>,
544 /// The return type of get_update_fulfill_htlc_and_commit.
545 pub enum UpdateFulfillCommitFetch {
546 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
547 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
548 /// previously placed in the holding cell (and has since been removed).
550 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
551 monitor_update: ChannelMonitorUpdate,
552 /// The value of the HTLC which was claimed, in msat.
553 htlc_value_msat: u64,
555 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
556 /// or has been forgotten (presumably previously claimed).
560 /// The return value of `monitor_updating_restored`
561 pub(super) struct MonitorRestoreUpdates {
562 pub raa: Option<msgs::RevokeAndACK>,
563 pub commitment_update: Option<msgs::CommitmentUpdate>,
564 pub order: RAACommitmentOrder,
565 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
566 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
567 pub finalized_claimed_htlcs: Vec<HTLCSource>,
568 pub funding_broadcastable: Option<Transaction>,
569 pub channel_ready: Option<msgs::ChannelReady>,
570 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
573 /// The return value of `signer_maybe_unblocked`
575 pub(super) struct SignerResumeUpdates {
576 pub commitment_update: Option<msgs::CommitmentUpdate>,
577 pub funding_signed: Option<msgs::FundingSigned>,
578 pub funding_created: Option<msgs::FundingCreated>,
579 pub channel_ready: Option<msgs::ChannelReady>,
582 /// The return value of `channel_reestablish`
583 pub(super) struct ReestablishResponses {
584 pub channel_ready: Option<msgs::ChannelReady>,
585 pub raa: Option<msgs::RevokeAndACK>,
586 pub commitment_update: Option<msgs::CommitmentUpdate>,
587 pub order: RAACommitmentOrder,
588 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
589 pub shutdown_msg: Option<msgs::Shutdown>,
592 /// The result of a shutdown that should be handled.
594 pub(crate) struct ShutdownResult {
595 /// A channel monitor update to apply.
596 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
597 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
598 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
599 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
600 /// propagated to the remainder of the batch.
601 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
604 /// If the majority of the channels funds are to the fundee and the initiator holds only just
605 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
606 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
607 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
608 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
609 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
610 /// by this multiple without hitting this case, before sending.
611 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
612 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
613 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
614 /// leave the channel less usable as we hold a bigger reserve.
615 #[cfg(any(fuzzing, test))]
616 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
617 #[cfg(not(any(fuzzing, test)))]
618 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
620 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
621 /// channel creation on an inbound channel, we simply force-close and move on.
622 /// This constant is the one suggested in BOLT 2.
623 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
625 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
626 /// not have enough balance value remaining to cover the onchain cost of this new
627 /// HTLC weight. If this happens, our counterparty fails the reception of our
628 /// commitment_signed including this new HTLC due to infringement on the channel
630 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
631 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
632 /// leads to a channel force-close. Ultimately, this is an issue coming from the
633 /// design of LN state machines, allowing asynchronous updates.
634 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
636 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
637 /// commitment transaction fees, with at least this many HTLCs present on the commitment
638 /// transaction (not counting the value of the HTLCs themselves).
639 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
641 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
642 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
643 /// ChannelUpdate prompted by the config update. This value was determined as follows:
645 /// * The expected interval between ticks (1 minute).
646 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
647 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
648 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
649 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
651 /// The number of ticks that may elapse while we're waiting for a response to a
652 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
655 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
656 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
658 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
659 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
660 /// exceeding this age limit will be force-closed and purged from memory.
661 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
663 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
664 pub(crate) const COINBASE_MATURITY: u32 = 100;
666 struct PendingChannelMonitorUpdate {
667 update: ChannelMonitorUpdate,
670 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
671 (0, update, required),
674 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
675 /// its variants containing an appropriate channel struct.
676 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
677 UnfundedOutboundV1(OutboundV1Channel<SP>),
678 UnfundedInboundV1(InboundV1Channel<SP>),
682 impl<'a, SP: Deref> ChannelPhase<SP> where
683 SP::Target: SignerProvider,
684 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
686 pub fn context(&'a self) -> &'a ChannelContext<SP> {
688 ChannelPhase::Funded(chan) => &chan.context,
689 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
690 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
694 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
696 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
697 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
698 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
703 /// Contains all state common to unfunded inbound/outbound channels.
704 pub(super) struct UnfundedChannelContext {
705 /// A counter tracking how many ticks have elapsed since this unfunded channel was
706 /// created. If this unfunded channel reaches peer has yet to respond after reaching
707 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
709 /// This is so that we don't keep channels around that haven't progressed to a funded state
710 /// in a timely manner.
711 unfunded_channel_age_ticks: usize,
714 impl UnfundedChannelContext {
715 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
716 /// having reached the unfunded channel age limit.
718 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
719 pub fn should_expire_unfunded_channel(&mut self) -> bool {
720 self.unfunded_channel_age_ticks += 1;
721 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
725 /// Contains everything about the channel including state, and various flags.
726 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
727 config: LegacyChannelConfig,
729 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
730 // constructed using it. The second element in the tuple corresponds to the number of ticks that
731 // have elapsed since the update occurred.
732 prev_config: Option<(ChannelConfig, usize)>,
734 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
738 /// The current channel ID.
739 channel_id: ChannelId,
740 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
741 /// Will be `None` for channels created prior to 0.0.115.
742 temporary_channel_id: Option<ChannelId>,
745 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
746 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
748 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
749 // Note that a number of our tests were written prior to the behavior here which retransmits
750 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
752 #[cfg(any(test, feature = "_test_utils"))]
753 pub(crate) announcement_sigs_state: AnnouncementSigsState,
754 #[cfg(not(any(test, feature = "_test_utils")))]
755 announcement_sigs_state: AnnouncementSigsState,
757 secp_ctx: Secp256k1<secp256k1::All>,
758 channel_value_satoshis: u64,
760 latest_monitor_update_id: u64,
762 holder_signer: ChannelSignerType<SP>,
763 shutdown_scriptpubkey: Option<ShutdownScript>,
764 destination_script: ScriptBuf,
766 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
767 // generation start at 0 and count up...this simplifies some parts of implementation at the
768 // cost of others, but should really just be changed.
770 cur_holder_commitment_transaction_number: u64,
771 cur_counterparty_commitment_transaction_number: u64,
772 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
773 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
774 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
775 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
777 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
778 /// need to ensure we resend them in the order we originally generated them. Note that because
779 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
780 /// sufficient to simply set this to the opposite of any message we are generating as we
781 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
782 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
784 resend_order: RAACommitmentOrder,
786 monitor_pending_channel_ready: bool,
787 monitor_pending_revoke_and_ack: bool,
788 monitor_pending_commitment_signed: bool,
790 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
791 // responsible for some of the HTLCs here or not - we don't know whether the update in question
792 // completed or not. We currently ignore these fields entirely when force-closing a channel,
793 // but need to handle this somehow or we run the risk of losing HTLCs!
794 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
795 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
796 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
798 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
799 /// but our signer (initially) refused to give us a signature, we should retry at some point in
800 /// the future when the signer indicates it may have a signature for us.
802 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
803 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
804 signer_pending_commitment_update: bool,
805 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
806 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
807 /// outbound or inbound.
808 signer_pending_funding: bool,
810 // pending_update_fee is filled when sending and receiving update_fee.
812 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
813 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
814 // generating new commitment transactions with exactly the same criteria as inbound/outbound
815 // HTLCs with similar state.
816 pending_update_fee: Option<(u32, FeeUpdateState)>,
817 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
818 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
819 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
820 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
821 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
822 holding_cell_update_fee: Option<u32>,
823 next_holder_htlc_id: u64,
824 next_counterparty_htlc_id: u64,
827 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
828 /// when the channel is updated in ways which may impact the `channel_update` message or when a
829 /// new block is received, ensuring it's always at least moderately close to the current real
831 update_time_counter: u32,
833 #[cfg(debug_assertions)]
834 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
835 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
836 #[cfg(debug_assertions)]
837 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
838 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
840 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
841 target_closing_feerate_sats_per_kw: Option<u32>,
843 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
844 /// update, we need to delay processing it until later. We do that here by simply storing the
845 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
846 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
848 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
849 /// transaction. These are set once we reach `closing_negotiation_ready`.
851 pub(crate) closing_fee_limits: Option<(u64, u64)>,
853 closing_fee_limits: Option<(u64, u64)>,
855 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
856 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
857 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
858 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
859 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
861 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
862 /// until we see a `commitment_signed` before doing so.
864 /// We don't bother to persist this - we anticipate this state won't last longer than a few
865 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
866 expecting_peer_commitment_signed: bool,
868 /// The hash of the block in which the funding transaction was included.
869 funding_tx_confirmed_in: Option<BlockHash>,
870 funding_tx_confirmation_height: u32,
871 short_channel_id: Option<u64>,
872 /// Either the height at which this channel was created or the height at which it was last
873 /// serialized if it was serialized by versions prior to 0.0.103.
874 /// We use this to close if funding is never broadcasted.
875 channel_creation_height: u32,
877 counterparty_dust_limit_satoshis: u64,
880 pub(super) holder_dust_limit_satoshis: u64,
882 holder_dust_limit_satoshis: u64,
885 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
887 counterparty_max_htlc_value_in_flight_msat: u64,
890 pub(super) holder_max_htlc_value_in_flight_msat: u64,
892 holder_max_htlc_value_in_flight_msat: u64,
894 /// minimum channel reserve for self to maintain - set by them.
895 counterparty_selected_channel_reserve_satoshis: Option<u64>,
898 pub(super) holder_selected_channel_reserve_satoshis: u64,
900 holder_selected_channel_reserve_satoshis: u64,
902 counterparty_htlc_minimum_msat: u64,
903 holder_htlc_minimum_msat: u64,
905 pub counterparty_max_accepted_htlcs: u16,
907 counterparty_max_accepted_htlcs: u16,
908 holder_max_accepted_htlcs: u16,
909 minimum_depth: Option<u32>,
911 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
913 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
914 funding_transaction: Option<Transaction>,
915 is_batch_funding: Option<()>,
917 counterparty_cur_commitment_point: Option<PublicKey>,
918 counterparty_prev_commitment_point: Option<PublicKey>,
919 counterparty_node_id: PublicKey,
921 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
923 commitment_secrets: CounterpartyCommitmentSecrets,
925 channel_update_status: ChannelUpdateStatus,
926 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
927 /// not complete within a single timer tick (one minute), we should force-close the channel.
928 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
930 /// Note that this field is reset to false on deserialization to give us a chance to connect to
931 /// our peer and start the closing_signed negotiation fresh.
932 closing_signed_in_flight: bool,
934 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
935 /// This can be used to rebroadcast the channel_announcement message later.
936 announcement_sigs: Option<(Signature, Signature)>,
938 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
939 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
940 // be, by comparing the cached values to the fee of the tranaction generated by
941 // `build_commitment_transaction`.
942 #[cfg(any(test, fuzzing))]
943 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
944 #[cfg(any(test, fuzzing))]
945 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
947 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
948 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
949 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
950 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
951 /// message until we receive a channel_reestablish.
953 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
954 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
956 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
957 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
958 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
959 /// unblock the state machine.
961 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
962 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
963 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
965 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
966 /// [`msgs::RevokeAndACK`] message from the counterparty.
967 sent_message_awaiting_response: Option<usize>,
969 #[cfg(any(test, fuzzing))]
970 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
971 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
972 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
973 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
974 // is fine, but as a sanity check in our failure to generate the second claim, we check here
975 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
976 historical_inbound_htlc_fulfills: HashSet<u64>,
978 /// This channel's type, as negotiated during channel open
979 channel_type: ChannelTypeFeatures,
981 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
982 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
983 // the channel's funding UTXO.
985 // We also use this when sending our peer a channel_update that isn't to be broadcasted
986 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
987 // associated channel mapping.
989 // We only bother storing the most recent SCID alias at any time, though our counterparty has
990 // to store all of them.
991 latest_inbound_scid_alias: Option<u64>,
993 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
994 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
995 // don't currently support node id aliases and eventually privacy should be provided with
996 // blinded paths instead of simple scid+node_id aliases.
997 outbound_scid_alias: u64,
999 // We track whether we already emitted a `ChannelPending` event.
1000 channel_pending_event_emitted: bool,
1002 // We track whether we already emitted a `ChannelReady` event.
1003 channel_ready_event_emitted: bool,
1005 /// The unique identifier used to re-derive the private key material for the channel through
1006 /// [`SignerProvider::derive_channel_signer`].
1007 channel_keys_id: [u8; 32],
1009 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1010 /// store it here and only release it to the `ChannelManager` once it asks for it.
1011 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1014 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1015 /// Allowed in any state (including after shutdown)
1016 pub fn get_update_time_counter(&self) -> u32 {
1017 self.update_time_counter
1020 pub fn get_latest_monitor_update_id(&self) -> u64 {
1021 self.latest_monitor_update_id
1024 pub fn should_announce(&self) -> bool {
1025 self.config.announced_channel
1028 pub fn is_outbound(&self) -> bool {
1029 self.channel_transaction_parameters.is_outbound_from_holder
1032 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1033 /// Allowed in any state (including after shutdown)
1034 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1035 self.config.options.forwarding_fee_base_msat
1038 /// Returns true if we've ever received a message from the remote end for this Channel
1039 pub fn have_received_message(&self) -> bool {
1040 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1043 /// Returns true if this channel is fully established and not known to be closing.
1044 /// Allowed in any state (including after shutdown)
1045 pub fn is_usable(&self) -> bool {
1046 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1047 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1050 /// shutdown state returns the state of the channel in its various stages of shutdown
1051 pub fn shutdown_state(&self) -> ChannelShutdownState {
1052 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1053 return ChannelShutdownState::ShutdownComplete;
1055 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1056 return ChannelShutdownState::ShutdownInitiated;
1058 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1059 return ChannelShutdownState::ResolvingHTLCs;
1061 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1062 return ChannelShutdownState::NegotiatingClosingFee;
1064 return ChannelShutdownState::NotShuttingDown;
1067 fn closing_negotiation_ready(&self) -> bool {
1068 self.pending_inbound_htlcs.is_empty() &&
1069 self.pending_outbound_htlcs.is_empty() &&
1070 self.pending_update_fee.is_none() &&
1071 self.channel_state &
1072 (BOTH_SIDES_SHUTDOWN_MASK |
1073 ChannelState::AwaitingRemoteRevoke as u32 |
1074 ChannelState::PeerDisconnected as u32 |
1075 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1078 /// Returns true if this channel is currently available for use. This is a superset of
1079 /// is_usable() and considers things like the channel being temporarily disabled.
1080 /// Allowed in any state (including after shutdown)
1081 pub fn is_live(&self) -> bool {
1082 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1085 // Public utilities:
1087 pub fn channel_id(&self) -> ChannelId {
1091 // Return the `temporary_channel_id` used during channel establishment.
1093 // Will return `None` for channels created prior to LDK version 0.0.115.
1094 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1095 self.temporary_channel_id
1098 pub fn minimum_depth(&self) -> Option<u32> {
1102 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1103 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1104 pub fn get_user_id(&self) -> u128 {
1108 /// Gets the channel's type
1109 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1113 /// Gets the channel's `short_channel_id`.
1115 /// Will return `None` if the channel hasn't been confirmed yet.
1116 pub fn get_short_channel_id(&self) -> Option<u64> {
1117 self.short_channel_id
1120 /// Allowed in any state (including after shutdown)
1121 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1122 self.latest_inbound_scid_alias
1125 /// Allowed in any state (including after shutdown)
1126 pub fn outbound_scid_alias(&self) -> u64 {
1127 self.outbound_scid_alias
1130 /// Returns the holder signer for this channel.
1132 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1133 return &self.holder_signer
1136 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1137 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1138 /// or prior to any channel actions during `Channel` initialization.
1139 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1140 debug_assert_eq!(self.outbound_scid_alias, 0);
1141 self.outbound_scid_alias = outbound_scid_alias;
1144 /// Returns the funding_txo we either got from our peer, or were given by
1145 /// get_funding_created.
1146 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1147 self.channel_transaction_parameters.funding_outpoint
1150 /// Returns the height in which our funding transaction was confirmed.
1151 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1152 let conf_height = self.funding_tx_confirmation_height;
1153 if conf_height > 0 {
1160 /// Returns the block hash in which our funding transaction was confirmed.
1161 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1162 self.funding_tx_confirmed_in
1165 /// Returns the current number of confirmations on the funding transaction.
1166 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1167 if self.funding_tx_confirmation_height == 0 {
1168 // We either haven't seen any confirmation yet, or observed a reorg.
1172 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1175 fn get_holder_selected_contest_delay(&self) -> u16 {
1176 self.channel_transaction_parameters.holder_selected_contest_delay
1179 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1180 &self.channel_transaction_parameters.holder_pubkeys
1183 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1184 self.channel_transaction_parameters.counterparty_parameters
1185 .as_ref().map(|params| params.selected_contest_delay)
1188 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1189 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1192 /// Allowed in any state (including after shutdown)
1193 pub fn get_counterparty_node_id(&self) -> PublicKey {
1194 self.counterparty_node_id
1197 /// Allowed in any state (including after shutdown)
1198 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1199 self.holder_htlc_minimum_msat
1202 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1203 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1204 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1207 /// Allowed in any state (including after shutdown)
1208 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1210 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1211 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1212 // channel might have been used to route very small values (either by honest users or as DoS).
1213 self.channel_value_satoshis * 1000 * 9 / 10,
1215 self.counterparty_max_htlc_value_in_flight_msat
1219 /// Allowed in any state (including after shutdown)
1220 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1221 self.counterparty_htlc_minimum_msat
1224 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1225 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1226 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1229 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1230 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1231 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1233 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1234 party_max_htlc_value_in_flight_msat
1239 pub fn get_value_satoshis(&self) -> u64 {
1240 self.channel_value_satoshis
1243 pub fn get_fee_proportional_millionths(&self) -> u32 {
1244 self.config.options.forwarding_fee_proportional_millionths
1247 pub fn get_cltv_expiry_delta(&self) -> u16 {
1248 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1251 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1252 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1253 where F::Target: FeeEstimator
1255 match self.config.options.max_dust_htlc_exposure {
1256 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1257 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1258 ConfirmationTarget::OnChainSweep) as u64;
1259 feerate_per_kw.saturating_mul(multiplier)
1261 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1265 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1266 pub fn prev_config(&self) -> Option<ChannelConfig> {
1267 self.prev_config.map(|prev_config| prev_config.0)
1270 // Checks whether we should emit a `ChannelPending` event.
1271 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1272 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1275 // Returns whether we already emitted a `ChannelPending` event.
1276 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1277 self.channel_pending_event_emitted
1280 // Remembers that we already emitted a `ChannelPending` event.
1281 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1282 self.channel_pending_event_emitted = true;
1285 // Checks whether we should emit a `ChannelReady` event.
1286 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1287 self.is_usable() && !self.channel_ready_event_emitted
1290 // Remembers that we already emitted a `ChannelReady` event.
1291 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1292 self.channel_ready_event_emitted = true;
1295 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1296 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1297 /// no longer be considered when forwarding HTLCs.
1298 pub fn maybe_expire_prev_config(&mut self) {
1299 if self.prev_config.is_none() {
1302 let prev_config = self.prev_config.as_mut().unwrap();
1304 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1305 self.prev_config = None;
1309 /// Returns the current [`ChannelConfig`] applied to the channel.
1310 pub fn config(&self) -> ChannelConfig {
1314 /// Updates the channel's config. A bool is returned indicating whether the config update
1315 /// applied resulted in a new ChannelUpdate message.
1316 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1317 let did_channel_update =
1318 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1319 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1320 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1321 if did_channel_update {
1322 self.prev_config = Some((self.config.options, 0));
1323 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1324 // policy change to propagate throughout the network.
1325 self.update_time_counter += 1;
1327 self.config.options = *config;
1331 /// Returns true if funding_signed was sent/received and the
1332 /// funding transaction has been broadcast if necessary.
1333 pub fn is_funding_broadcast(&self) -> bool {
1334 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1335 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1338 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1339 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1340 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1341 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1342 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1344 /// @local is used only to convert relevant internal structures which refer to remote vs local
1345 /// to decide value of outputs and direction of HTLCs.
1346 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1347 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1348 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1349 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1350 /// which peer generated this transaction and "to whom" this transaction flows.
1352 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1353 where L::Target: Logger
1355 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1356 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1357 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1359 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1360 let mut remote_htlc_total_msat = 0;
1361 let mut local_htlc_total_msat = 0;
1362 let mut value_to_self_msat_offset = 0;
1364 let mut feerate_per_kw = self.feerate_per_kw;
1365 if let Some((feerate, update_state)) = self.pending_update_fee {
1366 if match update_state {
1367 // Note that these match the inclusion criteria when scanning
1368 // pending_inbound_htlcs below.
1369 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1370 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1371 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1373 feerate_per_kw = feerate;
1377 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1378 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1379 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1381 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1383 macro_rules! get_htlc_in_commitment {
1384 ($htlc: expr, $offered: expr) => {
1385 HTLCOutputInCommitment {
1387 amount_msat: $htlc.amount_msat,
1388 cltv_expiry: $htlc.cltv_expiry,
1389 payment_hash: $htlc.payment_hash,
1390 transaction_output_index: None
1395 macro_rules! add_htlc_output {
1396 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1397 if $outbound == local { // "offered HTLC output"
1398 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1399 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1402 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1404 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1405 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1406 included_non_dust_htlcs.push((htlc_in_tx, $source));
1408 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1409 included_dust_htlcs.push((htlc_in_tx, $source));
1412 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1413 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1416 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1418 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1419 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1420 included_non_dust_htlcs.push((htlc_in_tx, $source));
1422 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1423 included_dust_htlcs.push((htlc_in_tx, $source));
1429 for ref htlc in self.pending_inbound_htlcs.iter() {
1430 let (include, state_name) = match htlc.state {
1431 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1432 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1433 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1434 InboundHTLCState::Committed => (true, "Committed"),
1435 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1439 add_htlc_output!(htlc, false, None, state_name);
1440 remote_htlc_total_msat += htlc.amount_msat;
1442 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1444 &InboundHTLCState::LocalRemoved(ref reason) => {
1445 if generated_by_local {
1446 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1447 value_to_self_msat_offset += htlc.amount_msat as i64;
1456 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1458 for ref htlc in self.pending_outbound_htlcs.iter() {
1459 let (include, state_name) = match htlc.state {
1460 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1461 OutboundHTLCState::Committed => (true, "Committed"),
1462 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1463 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1464 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1467 let preimage_opt = match htlc.state {
1468 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1469 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1470 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1474 if let Some(preimage) = preimage_opt {
1475 preimages.push(preimage);
1479 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1480 local_htlc_total_msat += htlc.amount_msat;
1482 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1484 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1485 value_to_self_msat_offset -= htlc.amount_msat as i64;
1487 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1488 if !generated_by_local {
1489 value_to_self_msat_offset -= htlc.amount_msat as i64;
1497 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1498 assert!(value_to_self_msat >= 0);
1499 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1500 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1501 // "violate" their reserve value by couting those against it. Thus, we have to convert
1502 // everything to i64 before subtracting as otherwise we can overflow.
1503 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1504 assert!(value_to_remote_msat >= 0);
1506 #[cfg(debug_assertions)]
1508 // Make sure that the to_self/to_remote is always either past the appropriate
1509 // channel_reserve *or* it is making progress towards it.
1510 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1511 self.holder_max_commitment_tx_output.lock().unwrap()
1513 self.counterparty_max_commitment_tx_output.lock().unwrap()
1515 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1516 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1517 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1518 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1521 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1522 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1523 let (value_to_self, value_to_remote) = if self.is_outbound() {
1524 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1526 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1529 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1530 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1531 let (funding_pubkey_a, funding_pubkey_b) = if local {
1532 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1534 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1537 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1538 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1543 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1544 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1549 let num_nondust_htlcs = included_non_dust_htlcs.len();
1551 let channel_parameters =
1552 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1553 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1554 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1561 &mut included_non_dust_htlcs,
1564 let mut htlcs_included = included_non_dust_htlcs;
1565 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1566 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1567 htlcs_included.append(&mut included_dust_htlcs);
1569 // For the stats, trimmed-to-0 the value in msats accordingly
1570 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1571 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1579 local_balance_msat: value_to_self_msat as u64,
1580 remote_balance_msat: value_to_remote_msat as u64,
1586 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1587 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1588 /// our counterparty!)
1589 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1590 /// TODO Some magic rust shit to compile-time check this?
1591 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1592 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1593 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1594 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1595 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1597 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1601 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1602 /// will sign and send to our counterparty.
1603 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1604 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1605 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1606 //may see payments to it!
1607 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1608 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1609 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1611 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1614 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1615 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1616 /// Panics if called before accept_channel/InboundV1Channel::new
1617 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1618 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1621 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1622 &self.get_counterparty_pubkeys().funding_pubkey
1625 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1629 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1630 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1631 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1632 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1633 // more dust balance if the feerate increases when we have several HTLCs pending
1634 // which are near the dust limit.
1635 let mut feerate_per_kw = self.feerate_per_kw;
1636 // If there's a pending update fee, use it to ensure we aren't under-estimating
1637 // potential feerate updates coming soon.
1638 if let Some((feerate, _)) = self.pending_update_fee {
1639 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1641 if let Some(feerate) = outbound_feerate_update {
1642 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1644 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1647 /// Get forwarding information for the counterparty.
1648 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1649 self.counterparty_forwarding_info.clone()
1652 /// Returns a HTLCStats about inbound pending htlcs
1653 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1655 let mut stats = HTLCStats {
1656 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1657 pending_htlcs_value_msat: 0,
1658 on_counterparty_tx_dust_exposure_msat: 0,
1659 on_holder_tx_dust_exposure_msat: 0,
1660 holding_cell_msat: 0,
1661 on_holder_tx_holding_cell_htlcs_count: 0,
1664 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1667 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1668 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1669 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1671 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1672 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1673 for ref htlc in context.pending_inbound_htlcs.iter() {
1674 stats.pending_htlcs_value_msat += htlc.amount_msat;
1675 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1676 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1678 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1679 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1685 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1686 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1688 let mut stats = HTLCStats {
1689 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1690 pending_htlcs_value_msat: 0,
1691 on_counterparty_tx_dust_exposure_msat: 0,
1692 on_holder_tx_dust_exposure_msat: 0,
1693 holding_cell_msat: 0,
1694 on_holder_tx_holding_cell_htlcs_count: 0,
1697 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1700 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1701 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1702 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1704 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1705 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1706 for ref htlc in context.pending_outbound_htlcs.iter() {
1707 stats.pending_htlcs_value_msat += htlc.amount_msat;
1708 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1709 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1711 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1712 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1716 for update in context.holding_cell_htlc_updates.iter() {
1717 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1718 stats.pending_htlcs += 1;
1719 stats.pending_htlcs_value_msat += amount_msat;
1720 stats.holding_cell_msat += amount_msat;
1721 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1722 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1724 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1725 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1727 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1734 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1735 /// Doesn't bother handling the
1736 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1737 /// corner case properly.
1738 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1739 -> AvailableBalances
1740 where F::Target: FeeEstimator
1742 let context = &self;
1743 // Note that we have to handle overflow due to the above case.
1744 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1745 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1747 let mut balance_msat = context.value_to_self_msat;
1748 for ref htlc in context.pending_inbound_htlcs.iter() {
1749 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1750 balance_msat += htlc.amount_msat;
1753 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1755 let outbound_capacity_msat = context.value_to_self_msat
1756 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1758 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1760 let mut available_capacity_msat = outbound_capacity_msat;
1762 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1763 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1767 if context.is_outbound() {
1768 // We should mind channel commit tx fee when computing how much of the available capacity
1769 // can be used in the next htlc. Mirrors the logic in send_htlc.
1771 // The fee depends on whether the amount we will be sending is above dust or not,
1772 // and the answer will in turn change the amount itself — making it a circular
1774 // This complicates the computation around dust-values, up to the one-htlc-value.
1775 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1776 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1777 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1780 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1781 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1782 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1783 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1784 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1785 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1786 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1789 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1790 // value ends up being below dust, we have this fee available again. In that case,
1791 // match the value to right-below-dust.
1792 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1793 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1794 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1795 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1796 debug_assert!(one_htlc_difference_msat != 0);
1797 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1798 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1799 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1801 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1804 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1805 // sending a new HTLC won't reduce their balance below our reserve threshold.
1806 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1807 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1808 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1811 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1812 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1814 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1815 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1816 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1818 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1819 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1820 // we've selected for them, we can only send dust HTLCs.
1821 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1825 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1827 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1828 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1829 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1830 // send above the dust limit (as the router can always overpay to meet the dust limit).
1831 let mut remaining_msat_below_dust_exposure_limit = None;
1832 let mut dust_exposure_dust_limit_msat = 0;
1833 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1835 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1836 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1838 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1839 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1840 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1842 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1843 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1844 remaining_msat_below_dust_exposure_limit =
1845 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1846 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1849 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1850 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1851 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1852 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1853 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1854 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1857 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1858 if available_capacity_msat < dust_exposure_dust_limit_msat {
1859 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1861 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1865 available_capacity_msat = cmp::min(available_capacity_msat,
1866 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1868 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1869 available_capacity_msat = 0;
1873 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1874 - context.value_to_self_msat as i64
1875 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1876 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1878 outbound_capacity_msat,
1879 next_outbound_htlc_limit_msat: available_capacity_msat,
1880 next_outbound_htlc_minimum_msat,
1885 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1886 let context = &self;
1887 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1890 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1891 /// number of pending HTLCs that are on track to be in our next commitment tx.
1893 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1894 /// `fee_spike_buffer_htlc` is `Some`.
1896 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1897 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1899 /// Dust HTLCs are excluded.
1900 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1901 let context = &self;
1902 assert!(context.is_outbound());
1904 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1907 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1908 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1910 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1911 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1913 let mut addl_htlcs = 0;
1914 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1916 HTLCInitiator::LocalOffered => {
1917 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1921 HTLCInitiator::RemoteOffered => {
1922 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1928 let mut included_htlcs = 0;
1929 for ref htlc in context.pending_inbound_htlcs.iter() {
1930 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1933 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1934 // transaction including this HTLC if it times out before they RAA.
1935 included_htlcs += 1;
1938 for ref htlc in context.pending_outbound_htlcs.iter() {
1939 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1943 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1944 OutboundHTLCState::Committed => included_htlcs += 1,
1945 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1946 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1947 // transaction won't be generated until they send us their next RAA, which will mean
1948 // dropping any HTLCs in this state.
1953 for htlc in context.holding_cell_htlc_updates.iter() {
1955 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1956 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1961 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1962 // ack we're guaranteed to never include them in commitment txs anymore.
1966 let num_htlcs = included_htlcs + addl_htlcs;
1967 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1968 #[cfg(any(test, fuzzing))]
1971 if fee_spike_buffer_htlc.is_some() {
1972 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1974 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1975 + context.holding_cell_htlc_updates.len();
1976 let commitment_tx_info = CommitmentTxInfoCached {
1978 total_pending_htlcs,
1979 next_holder_htlc_id: match htlc.origin {
1980 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1981 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1983 next_counterparty_htlc_id: match htlc.origin {
1984 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1985 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1987 feerate: context.feerate_per_kw,
1989 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1994 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1995 /// pending HTLCs that are on track to be in their next commitment tx
1997 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1998 /// `fee_spike_buffer_htlc` is `Some`.
2000 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2001 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2003 /// Dust HTLCs are excluded.
2004 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2005 let context = &self;
2006 assert!(!context.is_outbound());
2008 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2011 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2012 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2014 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2015 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2017 let mut addl_htlcs = 0;
2018 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2020 HTLCInitiator::LocalOffered => {
2021 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2025 HTLCInitiator::RemoteOffered => {
2026 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2032 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2033 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2034 // committed outbound HTLCs, see below.
2035 let mut included_htlcs = 0;
2036 for ref htlc in context.pending_inbound_htlcs.iter() {
2037 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2040 included_htlcs += 1;
2043 for ref htlc in context.pending_outbound_htlcs.iter() {
2044 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2047 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2048 // i.e. if they've responded to us with an RAA after announcement.
2050 OutboundHTLCState::Committed => included_htlcs += 1,
2051 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2052 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2057 let num_htlcs = included_htlcs + addl_htlcs;
2058 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2059 #[cfg(any(test, fuzzing))]
2062 if fee_spike_buffer_htlc.is_some() {
2063 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2065 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2066 let commitment_tx_info = CommitmentTxInfoCached {
2068 total_pending_htlcs,
2069 next_holder_htlc_id: match htlc.origin {
2070 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2071 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2073 next_counterparty_htlc_id: match htlc.origin {
2074 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2075 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2077 feerate: context.feerate_per_kw,
2079 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2084 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2085 where F: Fn() -> Option<O> {
2086 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2087 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2094 /// Returns the transaction if there is a pending funding transaction that is yet to be
2096 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2097 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2100 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2102 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2103 self.if_unbroadcasted_funding(||
2104 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2108 /// Returns whether the channel is funded in a batch.
2109 pub fn is_batch_funding(&self) -> bool {
2110 self.is_batch_funding.is_some()
2113 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2115 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2116 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2119 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2120 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2121 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2122 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2123 /// immediately (others we will have to allow to time out).
2124 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2125 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2126 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2127 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2128 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2129 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2131 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2132 // return them to fail the payment.
2133 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2134 let counterparty_node_id = self.get_counterparty_node_id();
2135 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2137 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2138 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2143 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2144 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2145 // returning a channel monitor update here would imply a channel monitor update before
2146 // we even registered the channel monitor to begin with, which is invalid.
2147 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2148 // funding transaction, don't return a funding txo (which prevents providing the
2149 // monitor update to the user, even if we return one).
2150 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2151 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2152 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2153 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2154 update_id: self.latest_monitor_update_id,
2155 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2159 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2161 self.channel_state = ChannelState::ShutdownComplete as u32;
2162 self.update_time_counter += 1;
2165 dropped_outbound_htlcs,
2166 unbroadcasted_batch_funding_txid,
2170 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2171 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2172 let counterparty_keys = self.build_remote_transaction_keys();
2173 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2174 let signature = match &self.holder_signer {
2175 // TODO (taproot|arik): move match into calling method for Taproot
2176 ChannelSignerType::Ecdsa(ecdsa) => {
2177 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2178 .map(|(sig, _)| sig).ok()?
2180 // TODO (taproot|arik)
2185 if self.signer_pending_funding {
2186 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2187 self.signer_pending_funding = false;
2190 Some(msgs::FundingCreated {
2191 temporary_channel_id: self.temporary_channel_id.unwrap(),
2192 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2193 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2196 partial_signature_with_nonce: None,
2198 next_local_nonce: None,
2202 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2203 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2204 let counterparty_keys = self.build_remote_transaction_keys();
2205 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2207 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2208 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2209 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2210 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2212 match &self.holder_signer {
2213 // TODO (arik): move match into calling method for Taproot
2214 ChannelSignerType::Ecdsa(ecdsa) => {
2215 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2216 .map(|(signature, _)| msgs::FundingSigned {
2217 channel_id: self.channel_id(),
2220 partial_signature_with_nonce: None,
2224 if funding_signed.is_none() {
2225 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2226 self.signer_pending_funding = true;
2227 } else if self.signer_pending_funding {
2228 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2229 self.signer_pending_funding = false;
2232 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2233 (counterparty_initial_commitment_tx, funding_signed)
2235 // TODO (taproot|arik)
2242 // Internal utility functions for channels
2244 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2245 /// `channel_value_satoshis` in msat, set through
2246 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2248 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2250 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2251 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2252 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2254 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2257 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2259 channel_value_satoshis * 10 * configured_percent
2262 /// Returns a minimum channel reserve value the remote needs to maintain,
2263 /// required by us according to the configured or default
2264 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2266 /// Guaranteed to return a value no larger than channel_value_satoshis
2268 /// This is used both for outbound and inbound channels and has lower bound
2269 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2270 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2271 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2272 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2275 /// This is for legacy reasons, present for forward-compatibility.
2276 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2277 /// from storage. Hence, we use this function to not persist default values of
2278 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2279 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2280 let (q, _) = channel_value_satoshis.overflowing_div(100);
2281 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2284 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2285 // Note that num_htlcs should not include dust HTLCs.
2287 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2288 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2291 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2292 // Note that num_htlcs should not include dust HTLCs.
2293 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2294 // Note that we need to divide before multiplying to round properly,
2295 // since the lowest denomination of bitcoin on-chain is the satoshi.
2296 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2299 // Holder designates channel data owned for the benefit of the user client.
2300 // Counterparty designates channel data owned by the another channel participant entity.
2301 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2302 pub context: ChannelContext<SP>,
2305 #[cfg(any(test, fuzzing))]
2306 struct CommitmentTxInfoCached {
2308 total_pending_htlcs: usize,
2309 next_holder_htlc_id: u64,
2310 next_counterparty_htlc_id: u64,
2314 impl<SP: Deref> Channel<SP> where
2315 SP::Target: SignerProvider,
2316 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2318 fn check_remote_fee<F: Deref, L: Deref>(
2319 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2320 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2321 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2323 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2324 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2326 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2328 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2329 if feerate_per_kw < lower_limit {
2330 if let Some(cur_feerate) = cur_feerate_per_kw {
2331 if feerate_per_kw > cur_feerate {
2333 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2334 cur_feerate, feerate_per_kw);
2338 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2344 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2345 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2346 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2347 // outside of those situations will fail.
2348 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2352 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2357 1 + // script length (0)
2361 )*4 + // * 4 for non-witness parts
2362 2 + // witness marker and flag
2363 1 + // witness element count
2364 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2365 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2366 2*(1 + 71); // two signatures + sighash type flags
2367 if let Some(spk) = a_scriptpubkey {
2368 ret += ((8+1) + // output values and script length
2369 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2371 if let Some(spk) = b_scriptpubkey {
2372 ret += ((8+1) + // output values and script length
2373 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2379 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2380 assert!(self.context.pending_inbound_htlcs.is_empty());
2381 assert!(self.context.pending_outbound_htlcs.is_empty());
2382 assert!(self.context.pending_update_fee.is_none());
2384 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2385 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2386 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2388 if value_to_holder < 0 {
2389 assert!(self.context.is_outbound());
2390 total_fee_satoshis += (-value_to_holder) as u64;
2391 } else if value_to_counterparty < 0 {
2392 assert!(!self.context.is_outbound());
2393 total_fee_satoshis += (-value_to_counterparty) as u64;
2396 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2397 value_to_counterparty = 0;
2400 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2401 value_to_holder = 0;
2404 assert!(self.context.shutdown_scriptpubkey.is_some());
2405 let holder_shutdown_script = self.get_closing_scriptpubkey();
2406 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2407 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2409 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2410 (closing_transaction, total_fee_satoshis)
2413 fn funding_outpoint(&self) -> OutPoint {
2414 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2417 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2420 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2421 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2423 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2425 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2426 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2427 where L::Target: Logger {
2428 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2429 // (see equivalent if condition there).
2430 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2431 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2432 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2433 self.context.latest_monitor_update_id = mon_update_id;
2434 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2435 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2439 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2440 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2441 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2442 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2444 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2445 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2447 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2449 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2450 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2451 // these, but for now we just have to treat them as normal.
2453 let mut pending_idx = core::usize::MAX;
2454 let mut htlc_value_msat = 0;
2455 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2456 if htlc.htlc_id == htlc_id_arg {
2457 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2458 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2459 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2461 InboundHTLCState::Committed => {},
2462 InboundHTLCState::LocalRemoved(ref reason) => {
2463 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2465 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2466 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2468 return UpdateFulfillFetch::DuplicateClaim {};
2471 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2472 // Don't return in release mode here so that we can update channel_monitor
2476 htlc_value_msat = htlc.amount_msat;
2480 if pending_idx == core::usize::MAX {
2481 #[cfg(any(test, fuzzing))]
2482 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2483 // this is simply a duplicate claim, not previously failed and we lost funds.
2484 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2485 return UpdateFulfillFetch::DuplicateClaim {};
2488 // Now update local state:
2490 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2491 // can claim it even if the channel hits the chain before we see their next commitment.
2492 self.context.latest_monitor_update_id += 1;
2493 let monitor_update = ChannelMonitorUpdate {
2494 update_id: self.context.latest_monitor_update_id,
2495 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2496 payment_preimage: payment_preimage_arg.clone(),
2500 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2501 // Note that this condition is the same as the assertion in
2502 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2503 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2504 // do not not get into this branch.
2505 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2506 match pending_update {
2507 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2508 if htlc_id_arg == htlc_id {
2509 // Make sure we don't leave latest_monitor_update_id incremented here:
2510 self.context.latest_monitor_update_id -= 1;
2511 #[cfg(any(test, fuzzing))]
2512 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2513 return UpdateFulfillFetch::DuplicateClaim {};
2516 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2517 if htlc_id_arg == htlc_id {
2518 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2519 // TODO: We may actually be able to switch to a fulfill here, though its
2520 // rare enough it may not be worth the complexity burden.
2521 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2522 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2528 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2529 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2530 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2532 #[cfg(any(test, fuzzing))]
2533 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2534 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2536 #[cfg(any(test, fuzzing))]
2537 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2540 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2541 if let InboundHTLCState::Committed = htlc.state {
2543 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2544 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2546 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2547 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2550 UpdateFulfillFetch::NewClaim {
2553 msg: Some(msgs::UpdateFulfillHTLC {
2554 channel_id: self.context.channel_id(),
2555 htlc_id: htlc_id_arg,
2556 payment_preimage: payment_preimage_arg,
2561 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2562 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2563 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2564 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2565 // Even if we aren't supposed to let new monitor updates with commitment state
2566 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2567 // matter what. Sadly, to push a new monitor update which flies before others
2568 // already queued, we have to insert it into the pending queue and update the
2569 // update_ids of all the following monitors.
2570 if release_cs_monitor && msg.is_some() {
2571 let mut additional_update = self.build_commitment_no_status_check(logger);
2572 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2573 // to be strictly increasing by one, so decrement it here.
2574 self.context.latest_monitor_update_id = monitor_update.update_id;
2575 monitor_update.updates.append(&mut additional_update.updates);
2577 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2578 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2579 monitor_update.update_id = new_mon_id;
2580 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2581 held_update.update.update_id += 1;
2584 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2585 let update = self.build_commitment_no_status_check(logger);
2586 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2592 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2593 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2595 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2599 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2600 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2601 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2602 /// before we fail backwards.
2604 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2605 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2606 /// [`ChannelError::Ignore`].
2607 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2608 -> Result<(), ChannelError> where L::Target: Logger {
2609 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2610 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2613 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2614 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2615 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2616 /// before we fail backwards.
2618 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2619 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2620 /// [`ChannelError::Ignore`].
2621 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2622 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2623 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2624 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2626 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2628 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2629 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2630 // these, but for now we just have to treat them as normal.
2632 let mut pending_idx = core::usize::MAX;
2633 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2634 if htlc.htlc_id == htlc_id_arg {
2636 InboundHTLCState::Committed => {},
2637 InboundHTLCState::LocalRemoved(ref reason) => {
2638 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2640 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2645 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2646 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2652 if pending_idx == core::usize::MAX {
2653 #[cfg(any(test, fuzzing))]
2654 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2655 // is simply a duplicate fail, not previously failed and we failed-back too early.
2656 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2660 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2661 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2662 force_holding_cell = true;
2665 // Now update local state:
2666 if force_holding_cell {
2667 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2668 match pending_update {
2669 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2670 if htlc_id_arg == htlc_id {
2671 #[cfg(any(test, fuzzing))]
2672 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2676 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2677 if htlc_id_arg == htlc_id {
2678 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2679 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2685 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2686 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2687 htlc_id: htlc_id_arg,
2693 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2695 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2696 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2699 Ok(Some(msgs::UpdateFailHTLC {
2700 channel_id: self.context.channel_id(),
2701 htlc_id: htlc_id_arg,
2706 // Message handlers:
2708 /// Handles a funding_signed message from the remote end.
2709 /// If this call is successful, broadcast the funding transaction (and not before!)
2710 pub fn funding_signed<L: Deref>(
2711 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2712 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
2716 if !self.context.is_outbound() {
2717 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2719 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2720 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2722 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2723 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2724 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2725 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2728 let funding_script = self.context.get_funding_redeemscript();
2730 let counterparty_keys = self.context.build_remote_transaction_keys();
2731 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2732 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2733 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2735 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2736 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2738 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2739 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2741 let trusted_tx = initial_commitment_tx.trust();
2742 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2743 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2744 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2745 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2746 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2750 let holder_commitment_tx = HolderCommitmentTransaction::new(
2751 initial_commitment_tx,
2754 &self.context.get_holder_pubkeys().funding_pubkey,
2755 self.context.counterparty_funding_pubkey()
2758 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2759 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2762 let funding_redeemscript = self.context.get_funding_redeemscript();
2763 let funding_txo = self.context.get_funding_txo().unwrap();
2764 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2765 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2766 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2767 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2768 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2769 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2770 shutdown_script, self.context.get_holder_selected_contest_delay(),
2771 &self.context.destination_script, (funding_txo, funding_txo_script),
2772 &self.context.channel_transaction_parameters,
2773 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2775 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2776 channel_monitor.provide_initial_counterparty_commitment_tx(
2777 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2778 self.context.cur_counterparty_commitment_transaction_number,
2779 self.context.counterparty_cur_commitment_point.unwrap(),
2780 counterparty_initial_commitment_tx.feerate_per_kw(),
2781 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2782 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2784 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2785 if self.context.is_batch_funding() {
2786 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2788 self.context.channel_state = ChannelState::FundingSent as u32;
2790 self.context.cur_holder_commitment_transaction_number -= 1;
2791 self.context.cur_counterparty_commitment_transaction_number -= 1;
2793 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2795 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2796 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2800 /// Updates the state of the channel to indicate that all channels in the batch have received
2801 /// funding_signed and persisted their monitors.
2802 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2803 /// treated as a non-batch channel going forward.
2804 pub fn set_batch_ready(&mut self) {
2805 self.context.is_batch_funding = None;
2806 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2809 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2810 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2812 pub fn channel_ready<NS: Deref, L: Deref>(
2813 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2814 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2815 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2817 NS::Target: NodeSigner,
2820 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2821 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2822 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2825 if let Some(scid_alias) = msg.short_channel_id_alias {
2826 if Some(scid_alias) != self.context.short_channel_id {
2827 // The scid alias provided can be used to route payments *from* our counterparty,
2828 // i.e. can be used for inbound payments and provided in invoices, but is not used
2829 // when routing outbound payments.
2830 self.context.latest_inbound_scid_alias = Some(scid_alias);
2834 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2836 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2837 // batch, but we can receive channel_ready messages.
2839 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2840 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2842 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2843 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2844 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2845 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2846 self.context.update_time_counter += 1;
2847 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2848 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2849 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2850 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2852 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2853 // required, or they're sending a fresh SCID alias.
2854 let expected_point =
2855 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2856 // If they haven't ever sent an updated point, the point they send should match
2858 self.context.counterparty_cur_commitment_point
2859 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2860 // If we've advanced the commitment number once, the second commitment point is
2861 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2862 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2863 self.context.counterparty_prev_commitment_point
2865 // If they have sent updated points, channel_ready is always supposed to match
2866 // their "first" point, which we re-derive here.
2867 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2868 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2869 ).expect("We already advanced, so previous secret keys should have been validated already")))
2871 if expected_point != Some(msg.next_per_commitment_point) {
2872 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2876 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2879 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2880 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2882 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2884 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2887 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2888 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2889 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2890 ) -> Result<(), ChannelError>
2891 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2892 FE::Target: FeeEstimator, L::Target: Logger,
2894 // We can't accept HTLCs sent after we've sent a shutdown.
2895 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2896 if local_sent_shutdown {
2897 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2899 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2900 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2901 if remote_sent_shutdown {
2902 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2904 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2905 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2907 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2908 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2910 if msg.amount_msat == 0 {
2911 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2913 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2914 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2917 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2918 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2919 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2920 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2922 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2923 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2926 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2927 // the reserve_satoshis we told them to always have as direct payment so that they lose
2928 // something if we punish them for broadcasting an old state).
2929 // Note that we don't really care about having a small/no to_remote output in our local
2930 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2931 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2932 // present in the next commitment transaction we send them (at least for fulfilled ones,
2933 // failed ones won't modify value_to_self).
2934 // Note that we will send HTLCs which another instance of rust-lightning would think
2935 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2936 // Channel state once they will not be present in the next received commitment
2938 let mut removed_outbound_total_msat = 0;
2939 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2940 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2941 removed_outbound_total_msat += htlc.amount_msat;
2942 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2943 removed_outbound_total_msat += htlc.amount_msat;
2947 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2948 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2951 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2952 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2953 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2955 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2956 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2957 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2958 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2959 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2960 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2961 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2965 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2966 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2967 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2968 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2969 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2970 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2971 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2975 let pending_value_to_self_msat =
2976 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2977 let pending_remote_value_msat =
2978 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2979 if pending_remote_value_msat < msg.amount_msat {
2980 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2983 // Check that the remote can afford to pay for this HTLC on-chain at the current
2984 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2986 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2987 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2988 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2990 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2991 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2995 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2996 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2998 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
2999 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3003 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3004 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3008 if !self.context.is_outbound() {
3009 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3010 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3011 // side, only on the sender's. Note that with anchor outputs we are no longer as
3012 // sensitive to fee spikes, so we need to account for them.
3013 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3014 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3015 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3016 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3018 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3019 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3020 // the HTLC, i.e. its status is already set to failing.
3021 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3022 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3025 // Check that they won't violate our local required channel reserve by adding this HTLC.
3026 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3027 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3028 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3029 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3032 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3033 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3035 if msg.cltv_expiry >= 500000000 {
3036 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3039 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3040 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3041 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3045 // Now update local state:
3046 self.context.next_counterparty_htlc_id += 1;
3047 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3048 htlc_id: msg.htlc_id,
3049 amount_msat: msg.amount_msat,
3050 payment_hash: msg.payment_hash,
3051 cltv_expiry: msg.cltv_expiry,
3052 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3057 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3059 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3060 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3061 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3062 if htlc.htlc_id == htlc_id {
3063 let outcome = match check_preimage {
3064 None => fail_reason.into(),
3065 Some(payment_preimage) => {
3066 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3067 if payment_hash != htlc.payment_hash {
3068 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3070 OutboundHTLCOutcome::Success(Some(payment_preimage))
3074 OutboundHTLCState::LocalAnnounced(_) =>
3075 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3076 OutboundHTLCState::Committed => {
3077 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3079 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3080 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3085 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3088 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3089 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3090 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3092 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3093 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3096 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3099 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3100 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3101 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3103 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3104 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3107 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3111 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3112 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3113 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3115 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3116 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3119 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3123 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3124 where L::Target: Logger
3126 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3127 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3129 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3130 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3132 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3133 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3136 let funding_script = self.context.get_funding_redeemscript();
3138 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3140 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3141 let commitment_txid = {
3142 let trusted_tx = commitment_stats.tx.trust();
3143 let bitcoin_tx = trusted_tx.built_transaction();
3144 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3146 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3147 log_bytes!(msg.signature.serialize_compact()[..]),
3148 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3149 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3150 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3151 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3155 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3157 // If our counterparty updated the channel fee in this commitment transaction, check that
3158 // they can actually afford the new fee now.
3159 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3160 update_state == FeeUpdateState::RemoteAnnounced
3163 debug_assert!(!self.context.is_outbound());
3164 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3165 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3166 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3169 #[cfg(any(test, fuzzing))]
3171 if self.context.is_outbound() {
3172 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3173 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3174 if let Some(info) = projected_commit_tx_info {
3175 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3176 + self.context.holding_cell_htlc_updates.len();
3177 if info.total_pending_htlcs == total_pending_htlcs
3178 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3179 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3180 && info.feerate == self.context.feerate_per_kw {
3181 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3187 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3188 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3191 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3192 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3193 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3194 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3195 // backwards compatibility, we never use it in production. To provide test coverage, here,
3196 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3197 #[allow(unused_assignments, unused_mut)]
3198 let mut separate_nondust_htlc_sources = false;
3199 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3200 use core::hash::{BuildHasher, Hasher};
3201 // Get a random value using the only std API to do so - the DefaultHasher
3202 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3203 separate_nondust_htlc_sources = rand_val % 2 == 0;
3206 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3207 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3208 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3209 if let Some(_) = htlc.transaction_output_index {
3210 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3211 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3212 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3214 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3215 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3216 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3217 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3218 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3219 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3220 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3221 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3223 if !separate_nondust_htlc_sources {
3224 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3227 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3229 if separate_nondust_htlc_sources {
3230 if let Some(source) = source_opt.take() {
3231 nondust_htlc_sources.push(source);
3234 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3237 let holder_commitment_tx = HolderCommitmentTransaction::new(
3238 commitment_stats.tx,
3240 msg.htlc_signatures.clone(),
3241 &self.context.get_holder_pubkeys().funding_pubkey,
3242 self.context.counterparty_funding_pubkey()
3245 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3246 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3248 // Update state now that we've passed all the can-fail calls...
3249 let mut need_commitment = false;
3250 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3251 if *update_state == FeeUpdateState::RemoteAnnounced {
3252 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3253 need_commitment = true;
3257 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3258 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3259 Some(forward_info.clone())
3261 if let Some(forward_info) = new_forward {
3262 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3263 &htlc.payment_hash, &self.context.channel_id);
3264 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3265 need_commitment = true;
3268 let mut claimed_htlcs = Vec::new();
3269 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3270 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3271 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3272 &htlc.payment_hash, &self.context.channel_id);
3273 // Grab the preimage, if it exists, instead of cloning
3274 let mut reason = OutboundHTLCOutcome::Success(None);
3275 mem::swap(outcome, &mut reason);
3276 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3277 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3278 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3279 // have a `Success(None)` reason. In this case we could forget some HTLC
3280 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3281 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3283 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3285 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3286 need_commitment = true;
3290 self.context.latest_monitor_update_id += 1;
3291 let mut monitor_update = ChannelMonitorUpdate {
3292 update_id: self.context.latest_monitor_update_id,
3293 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3294 commitment_tx: holder_commitment_tx,
3295 htlc_outputs: htlcs_and_sigs,
3297 nondust_htlc_sources,
3301 self.context.cur_holder_commitment_transaction_number -= 1;
3302 self.context.expecting_peer_commitment_signed = false;
3303 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3304 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3305 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3307 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3308 // In case we initially failed monitor updating without requiring a response, we need
3309 // to make sure the RAA gets sent first.
3310 self.context.monitor_pending_revoke_and_ack = true;
3311 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3312 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3313 // the corresponding HTLC status updates so that
3314 // get_last_commitment_update_for_send includes the right HTLCs.
3315 self.context.monitor_pending_commitment_signed = true;
3316 let mut additional_update = self.build_commitment_no_status_check(logger);
3317 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3318 // strictly increasing by one, so decrement it here.
3319 self.context.latest_monitor_update_id = monitor_update.update_id;
3320 monitor_update.updates.append(&mut additional_update.updates);
3322 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3323 &self.context.channel_id);
3324 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3327 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3328 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3329 // we'll send one right away when we get the revoke_and_ack when we
3330 // free_holding_cell_htlcs().
3331 let mut additional_update = self.build_commitment_no_status_check(logger);
3332 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3333 // strictly increasing by one, so decrement it here.
3334 self.context.latest_monitor_update_id = monitor_update.update_id;
3335 monitor_update.updates.append(&mut additional_update.updates);
3339 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3340 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3341 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3342 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3345 /// Public version of the below, checking relevant preconditions first.
3346 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3347 /// returns `(None, Vec::new())`.
3348 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3349 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3350 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3351 where F::Target: FeeEstimator, L::Target: Logger
3353 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3354 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3355 self.free_holding_cell_htlcs(fee_estimator, logger)
3356 } else { (None, Vec::new()) }
3359 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3360 /// for our counterparty.
3361 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3362 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3363 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3364 where F::Target: FeeEstimator, L::Target: Logger
3366 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3367 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3368 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3369 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3371 let mut monitor_update = ChannelMonitorUpdate {
3372 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3373 updates: Vec::new(),
3376 let mut htlc_updates = Vec::new();
3377 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3378 let mut update_add_count = 0;
3379 let mut update_fulfill_count = 0;
3380 let mut update_fail_count = 0;
3381 let mut htlcs_to_fail = Vec::new();
3382 for htlc_update in htlc_updates.drain(..) {
3383 // Note that this *can* fail, though it should be due to rather-rare conditions on
3384 // fee races with adding too many outputs which push our total payments just over
3385 // the limit. In case it's less rare than I anticipate, we may want to revisit
3386 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3387 // to rebalance channels.
3388 match &htlc_update {
3389 &HTLCUpdateAwaitingACK::AddHTLC {
3390 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3391 skimmed_fee_msat, blinding_point, ..
3393 match self.send_htlc(
3394 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3395 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3397 Ok(_) => update_add_count += 1,
3400 ChannelError::Ignore(ref msg) => {
3401 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3402 // If we fail to send here, then this HTLC should
3403 // be failed backwards. Failing to send here
3404 // indicates that this HTLC may keep being put back
3405 // into the holding cell without ever being
3406 // successfully forwarded/failed/fulfilled, causing
3407 // our counterparty to eventually close on us.
3408 htlcs_to_fail.push((source.clone(), *payment_hash));
3411 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3417 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3418 // If an HTLC claim was previously added to the holding cell (via
3419 // `get_update_fulfill_htlc`, then generating the claim message itself must
3420 // not fail - any in between attempts to claim the HTLC will have resulted
3421 // in it hitting the holding cell again and we cannot change the state of a
3422 // holding cell HTLC from fulfill to anything else.
3423 let mut additional_monitor_update =
3424 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3425 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3426 { monitor_update } else { unreachable!() };
3427 update_fulfill_count += 1;
3428 monitor_update.updates.append(&mut additional_monitor_update.updates);
3430 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3431 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3432 Ok(update_fail_msg_option) => {
3433 // If an HTLC failure was previously added to the holding cell (via
3434 // `queue_fail_htlc`) then generating the fail message itself must
3435 // not fail - we should never end up in a state where we double-fail
3436 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3437 // for a full revocation before failing.
3438 debug_assert!(update_fail_msg_option.is_some());
3439 update_fail_count += 1;
3442 if let ChannelError::Ignore(_) = e {}
3444 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3451 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3452 return (None, htlcs_to_fail);
3454 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3455 self.send_update_fee(feerate, false, fee_estimator, logger)
3460 let mut additional_update = self.build_commitment_no_status_check(logger);
3461 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3462 // but we want them to be strictly increasing by one, so reset it here.
3463 self.context.latest_monitor_update_id = monitor_update.update_id;
3464 monitor_update.updates.append(&mut additional_update.updates);
3466 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3467 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3468 update_add_count, update_fulfill_count, update_fail_count);
3470 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3471 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3477 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3478 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3479 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3480 /// generating an appropriate error *after* the channel state has been updated based on the
3481 /// revoke_and_ack message.
3482 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3483 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3484 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3485 where F::Target: FeeEstimator, L::Target: Logger,
3487 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3488 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3490 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3491 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3493 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3494 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3497 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3499 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3500 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3501 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3505 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3506 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3507 // haven't given them a new commitment transaction to broadcast). We should probably
3508 // take advantage of this by updating our channel monitor, sending them an error, and
3509 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3510 // lot of work, and there's some chance this is all a misunderstanding anyway.
3511 // We have to do *something*, though, since our signer may get mad at us for otherwise
3512 // jumping a remote commitment number, so best to just force-close and move on.
3513 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3516 #[cfg(any(test, fuzzing))]
3518 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3519 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3522 match &self.context.holder_signer {
3523 ChannelSignerType::Ecdsa(ecdsa) => {
3524 ecdsa.validate_counterparty_revocation(
3525 self.context.cur_counterparty_commitment_transaction_number + 1,
3527 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3529 // TODO (taproot|arik)
3534 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3535 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3536 self.context.latest_monitor_update_id += 1;
3537 let mut monitor_update = ChannelMonitorUpdate {
3538 update_id: self.context.latest_monitor_update_id,
3539 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3540 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3541 secret: msg.per_commitment_secret,
3545 // Update state now that we've passed all the can-fail calls...
3546 // (note that we may still fail to generate the new commitment_signed message, but that's
3547 // OK, we step the channel here and *then* if the new generation fails we can fail the
3548 // channel based on that, but stepping stuff here should be safe either way.
3549 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3550 self.context.sent_message_awaiting_response = None;
3551 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3552 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3553 self.context.cur_counterparty_commitment_transaction_number -= 1;
3555 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3556 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3559 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3560 let mut to_forward_infos = Vec::new();
3561 let mut revoked_htlcs = Vec::new();
3562 let mut finalized_claimed_htlcs = Vec::new();
3563 let mut update_fail_htlcs = Vec::new();
3564 let mut update_fail_malformed_htlcs = Vec::new();
3565 let mut require_commitment = false;
3566 let mut value_to_self_msat_diff: i64 = 0;
3569 // Take references explicitly so that we can hold multiple references to self.context.
3570 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3571 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3572 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3574 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3575 pending_inbound_htlcs.retain(|htlc| {
3576 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3577 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3578 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3579 value_to_self_msat_diff += htlc.amount_msat as i64;
3581 *expecting_peer_commitment_signed = true;
3585 pending_outbound_htlcs.retain(|htlc| {
3586 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3587 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3588 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3589 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3591 finalized_claimed_htlcs.push(htlc.source.clone());
3592 // They fulfilled, so we sent them money
3593 value_to_self_msat_diff -= htlc.amount_msat as i64;
3598 for htlc in pending_inbound_htlcs.iter_mut() {
3599 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3601 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3605 let mut state = InboundHTLCState::Committed;
3606 mem::swap(&mut state, &mut htlc.state);
3608 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3609 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3610 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3611 require_commitment = true;
3612 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3613 match forward_info {
3614 PendingHTLCStatus::Fail(fail_msg) => {
3615 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3616 require_commitment = true;
3618 HTLCFailureMsg::Relay(msg) => {
3619 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3620 update_fail_htlcs.push(msg)
3622 HTLCFailureMsg::Malformed(msg) => {
3623 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3624 update_fail_malformed_htlcs.push(msg)
3628 PendingHTLCStatus::Forward(forward_info) => {
3629 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3630 to_forward_infos.push((forward_info, htlc.htlc_id));
3631 htlc.state = InboundHTLCState::Committed;
3637 for htlc in pending_outbound_htlcs.iter_mut() {
3638 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3639 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3640 htlc.state = OutboundHTLCState::Committed;
3641 *expecting_peer_commitment_signed = true;
3643 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3644 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3645 // Grab the preimage, if it exists, instead of cloning
3646 let mut reason = OutboundHTLCOutcome::Success(None);
3647 mem::swap(outcome, &mut reason);
3648 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3649 require_commitment = true;
3653 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3655 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3656 match update_state {
3657 FeeUpdateState::Outbound => {
3658 debug_assert!(self.context.is_outbound());
3659 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3660 self.context.feerate_per_kw = feerate;
3661 self.context.pending_update_fee = None;
3662 self.context.expecting_peer_commitment_signed = true;
3664 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3665 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3666 debug_assert!(!self.context.is_outbound());
3667 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3668 require_commitment = true;
3669 self.context.feerate_per_kw = feerate;
3670 self.context.pending_update_fee = None;
3675 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3676 let release_state_str =
3677 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3678 macro_rules! return_with_htlcs_to_fail {
3679 ($htlcs_to_fail: expr) => {
3680 if !release_monitor {
3681 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3682 update: monitor_update,
3684 return Ok(($htlcs_to_fail, None));
3686 return Ok(($htlcs_to_fail, Some(monitor_update)));
3691 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3692 // We can't actually generate a new commitment transaction (incl by freeing holding
3693 // cells) while we can't update the monitor, so we just return what we have.
3694 if require_commitment {
3695 self.context.monitor_pending_commitment_signed = true;
3696 // When the monitor updating is restored we'll call
3697 // get_last_commitment_update_for_send(), which does not update state, but we're
3698 // definitely now awaiting a remote revoke before we can step forward any more, so
3700 let mut additional_update = self.build_commitment_no_status_check(logger);
3701 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3702 // strictly increasing by one, so decrement it here.
3703 self.context.latest_monitor_update_id = monitor_update.update_id;
3704 monitor_update.updates.append(&mut additional_update.updates);
3706 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3707 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3708 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3709 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3710 return_with_htlcs_to_fail!(Vec::new());
3713 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3714 (Some(mut additional_update), htlcs_to_fail) => {
3715 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3716 // strictly increasing by one, so decrement it here.
3717 self.context.latest_monitor_update_id = monitor_update.update_id;
3718 monitor_update.updates.append(&mut additional_update.updates);
3720 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3721 &self.context.channel_id(), release_state_str);
3723 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3724 return_with_htlcs_to_fail!(htlcs_to_fail);
3726 (None, htlcs_to_fail) => {
3727 if require_commitment {
3728 let mut additional_update = self.build_commitment_no_status_check(logger);
3730 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3731 // strictly increasing by one, so decrement it here.
3732 self.context.latest_monitor_update_id = monitor_update.update_id;
3733 monitor_update.updates.append(&mut additional_update.updates);
3735 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3736 &self.context.channel_id(),
3737 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3740 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3741 return_with_htlcs_to_fail!(htlcs_to_fail);
3743 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3744 &self.context.channel_id(), release_state_str);
3746 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3747 return_with_htlcs_to_fail!(htlcs_to_fail);
3753 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3754 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3755 /// commitment update.
3756 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3757 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3758 where F::Target: FeeEstimator, L::Target: Logger
3760 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3761 assert!(msg_opt.is_none(), "We forced holding cell?");
3764 /// Adds a pending update to this channel. See the doc for send_htlc for
3765 /// further details on the optionness of the return value.
3766 /// If our balance is too low to cover the cost of the next commitment transaction at the
3767 /// new feerate, the update is cancelled.
3769 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3770 /// [`Channel`] if `force_holding_cell` is false.
3771 fn send_update_fee<F: Deref, L: Deref>(
3772 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3773 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3774 ) -> Option<msgs::UpdateFee>
3775 where F::Target: FeeEstimator, L::Target: Logger
3777 if !self.context.is_outbound() {
3778 panic!("Cannot send fee from inbound channel");
3780 if !self.context.is_usable() {
3781 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3783 if !self.context.is_live() {
3784 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3787 // Before proposing a feerate update, check that we can actually afford the new fee.
3788 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3789 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3790 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3791 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3792 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3793 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3794 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3795 //TODO: auto-close after a number of failures?
3796 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3800 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3801 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3802 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3803 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3804 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3805 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3808 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3809 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3813 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3814 force_holding_cell = true;
3817 if force_holding_cell {
3818 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3822 debug_assert!(self.context.pending_update_fee.is_none());
3823 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3825 Some(msgs::UpdateFee {
3826 channel_id: self.context.channel_id,
3831 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3832 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3834 /// No further message handling calls may be made until a channel_reestablish dance has
3836 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3837 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3838 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3839 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3843 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3844 // While the below code should be idempotent, it's simpler to just return early, as
3845 // redundant disconnect events can fire, though they should be rare.
3849 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3850 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3853 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3854 // will be retransmitted.
3855 self.context.last_sent_closing_fee = None;
3856 self.context.pending_counterparty_closing_signed = None;
3857 self.context.closing_fee_limits = None;
3859 let mut inbound_drop_count = 0;
3860 self.context.pending_inbound_htlcs.retain(|htlc| {
3862 InboundHTLCState::RemoteAnnounced(_) => {
3863 // They sent us an update_add_htlc but we never got the commitment_signed.
3864 // We'll tell them what commitment_signed we're expecting next and they'll drop
3865 // this HTLC accordingly
3866 inbound_drop_count += 1;
3869 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3870 // We received a commitment_signed updating this HTLC and (at least hopefully)
3871 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3872 // in response to it yet, so don't touch it.
3875 InboundHTLCState::Committed => true,
3876 InboundHTLCState::LocalRemoved(_) => {
3877 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3878 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3879 // (that we missed). Keep this around for now and if they tell us they missed
3880 // the commitment_signed we can re-transmit the update then.
3885 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3887 if let Some((_, update_state)) = self.context.pending_update_fee {
3888 if update_state == FeeUpdateState::RemoteAnnounced {
3889 debug_assert!(!self.context.is_outbound());
3890 self.context.pending_update_fee = None;
3894 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3895 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3896 // They sent us an update to remove this but haven't yet sent the corresponding
3897 // commitment_signed, we need to move it back to Committed and they can re-send
3898 // the update upon reconnection.
3899 htlc.state = OutboundHTLCState::Committed;
3903 self.context.sent_message_awaiting_response = None;
3905 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3906 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3910 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3911 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3912 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3913 /// update completes (potentially immediately).
3914 /// The messages which were generated with the monitor update must *not* have been sent to the
3915 /// remote end, and must instead have been dropped. They will be regenerated when
3916 /// [`Self::monitor_updating_restored`] is called.
3918 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3919 /// [`chain::Watch`]: crate::chain::Watch
3920 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3921 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3922 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3923 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3924 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3926 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3927 self.context.monitor_pending_commitment_signed |= resend_commitment;
3928 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3929 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3930 self.context.monitor_pending_failures.append(&mut pending_fails);
3931 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3932 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3935 /// Indicates that the latest ChannelMonitor update has been committed by the client
3936 /// successfully and we should restore normal operation. Returns messages which should be sent
3937 /// to the remote side.
3938 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3939 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3940 user_config: &UserConfig, best_block_height: u32
3941 ) -> MonitorRestoreUpdates
3944 NS::Target: NodeSigner
3946 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3947 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3949 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3950 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3951 // first received the funding_signed.
3952 let mut funding_broadcastable =
3953 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3954 self.context.funding_transaction.take()
3956 // That said, if the funding transaction is already confirmed (ie we're active with a
3957 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3958 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3959 funding_broadcastable = None;
3962 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3963 // (and we assume the user never directly broadcasts the funding transaction and waits for
3964 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3965 // * an inbound channel that failed to persist the monitor on funding_created and we got
3966 // the funding transaction confirmed before the monitor was persisted, or
3967 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3968 let channel_ready = if self.context.monitor_pending_channel_ready {
3969 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3970 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3971 self.context.monitor_pending_channel_ready = false;
3972 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3973 Some(msgs::ChannelReady {
3974 channel_id: self.context.channel_id(),
3975 next_per_commitment_point,
3976 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3980 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3982 let mut accepted_htlcs = Vec::new();
3983 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3984 let mut failed_htlcs = Vec::new();
3985 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3986 let mut finalized_claimed_htlcs = Vec::new();
3987 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3989 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3990 self.context.monitor_pending_revoke_and_ack = false;
3991 self.context.monitor_pending_commitment_signed = false;
3992 return MonitorRestoreUpdates {
3993 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3994 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3998 let raa = if self.context.monitor_pending_revoke_and_ack {
3999 Some(self.get_last_revoke_and_ack())
4001 let commitment_update = if self.context.monitor_pending_commitment_signed {
4002 self.get_last_commitment_update_for_send(logger).ok()
4004 if commitment_update.is_some() {
4005 self.mark_awaiting_response();
4008 self.context.monitor_pending_revoke_and_ack = false;
4009 self.context.monitor_pending_commitment_signed = false;
4010 let order = self.context.resend_order.clone();
4011 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4012 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4013 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4014 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4015 MonitorRestoreUpdates {
4016 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4020 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4021 where F::Target: FeeEstimator, L::Target: Logger
4023 if self.context.is_outbound() {
4024 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4026 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4027 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4029 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4031 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4032 self.context.update_time_counter += 1;
4033 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4034 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4035 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4036 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4037 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4038 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4039 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4040 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4041 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4042 msg.feerate_per_kw, holder_tx_dust_exposure)));
4044 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4045 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4046 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4052 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4055 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4056 let commitment_update = if self.context.signer_pending_commitment_update {
4057 self.get_last_commitment_update_for_send(logger).ok()
4059 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4060 self.context.get_funding_signed_msg(logger).1
4062 let channel_ready = if funding_signed.is_some() {
4063 self.check_get_channel_ready(0)
4065 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4066 self.context.get_funding_created_msg(logger)
4069 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4070 if commitment_update.is_some() { "a" } else { "no" },
4071 if funding_signed.is_some() { "a" } else { "no" },
4072 if funding_created.is_some() { "a" } else { "no" },
4073 if channel_ready.is_some() { "a" } else { "no" });
4075 SignerResumeUpdates {
4083 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4084 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4085 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4086 msgs::RevokeAndACK {
4087 channel_id: self.context.channel_id,
4088 per_commitment_secret,
4089 next_per_commitment_point,
4091 next_local_nonce: None,
4095 /// Gets the last commitment update for immediate sending to our peer.
4096 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4097 let mut update_add_htlcs = Vec::new();
4098 let mut update_fulfill_htlcs = Vec::new();
4099 let mut update_fail_htlcs = Vec::new();
4100 let mut update_fail_malformed_htlcs = Vec::new();
4102 for htlc in self.context.pending_outbound_htlcs.iter() {
4103 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4104 update_add_htlcs.push(msgs::UpdateAddHTLC {
4105 channel_id: self.context.channel_id(),
4106 htlc_id: htlc.htlc_id,
4107 amount_msat: htlc.amount_msat,
4108 payment_hash: htlc.payment_hash,
4109 cltv_expiry: htlc.cltv_expiry,
4110 onion_routing_packet: (**onion_packet).clone(),
4111 skimmed_fee_msat: htlc.skimmed_fee_msat,
4112 blinding_point: htlc.blinding_point,
4117 for htlc in self.context.pending_inbound_htlcs.iter() {
4118 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4120 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4121 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4122 channel_id: self.context.channel_id(),
4123 htlc_id: htlc.htlc_id,
4124 reason: err_packet.clone()
4127 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4128 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4129 channel_id: self.context.channel_id(),
4130 htlc_id: htlc.htlc_id,
4131 sha256_of_onion: sha256_of_onion.clone(),
4132 failure_code: failure_code.clone(),
4135 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4136 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4137 channel_id: self.context.channel_id(),
4138 htlc_id: htlc.htlc_id,
4139 payment_preimage: payment_preimage.clone(),
4146 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4147 Some(msgs::UpdateFee {
4148 channel_id: self.context.channel_id(),
4149 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4153 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4154 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4155 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4156 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4157 if self.context.signer_pending_commitment_update {
4158 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4159 self.context.signer_pending_commitment_update = false;
4163 if !self.context.signer_pending_commitment_update {
4164 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4165 self.context.signer_pending_commitment_update = true;
4169 Ok(msgs::CommitmentUpdate {
4170 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4175 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4176 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4177 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4178 assert!(self.context.shutdown_scriptpubkey.is_some());
4179 Some(msgs::Shutdown {
4180 channel_id: self.context.channel_id,
4181 scriptpubkey: self.get_closing_scriptpubkey(),
4186 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4187 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4189 /// Some links printed in log lines are included here to check them during build (when run with
4190 /// `cargo doc --document-private-items`):
4191 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4192 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4193 pub fn channel_reestablish<L: Deref, NS: Deref>(
4194 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4195 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4196 ) -> Result<ReestablishResponses, ChannelError>
4199 NS::Target: NodeSigner
4201 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4202 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4203 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4204 // just close here instead of trying to recover.
4205 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4208 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4209 msg.next_local_commitment_number == 0 {
4210 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4213 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4214 if msg.next_remote_commitment_number > 0 {
4215 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4216 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4217 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4218 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4219 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4221 if msg.next_remote_commitment_number > our_commitment_transaction {
4222 macro_rules! log_and_panic {
4223 ($err_msg: expr) => {
4224 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4225 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4228 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4229 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4230 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4231 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4232 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4233 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4234 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4235 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4239 // Before we change the state of the channel, we check if the peer is sending a very old
4240 // commitment transaction number, if yes we send a warning message.
4241 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4242 return Err(ChannelError::Warn(format!(
4243 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4244 msg.next_remote_commitment_number,
4245 our_commitment_transaction
4249 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4250 // remaining cases either succeed or ErrorMessage-fail).
4251 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4252 self.context.sent_message_awaiting_response = None;
4254 let shutdown_msg = self.get_outbound_shutdown();
4256 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4258 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4259 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4260 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4261 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4262 if msg.next_remote_commitment_number != 0 {
4263 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4265 // Short circuit the whole handler as there is nothing we can resend them
4266 return Ok(ReestablishResponses {
4267 channel_ready: None,
4268 raa: None, commitment_update: None,
4269 order: RAACommitmentOrder::CommitmentFirst,
4270 shutdown_msg, announcement_sigs,
4274 // We have OurChannelReady set!
4275 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4276 return Ok(ReestablishResponses {
4277 channel_ready: Some(msgs::ChannelReady {
4278 channel_id: self.context.channel_id(),
4279 next_per_commitment_point,
4280 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4282 raa: None, commitment_update: None,
4283 order: RAACommitmentOrder::CommitmentFirst,
4284 shutdown_msg, announcement_sigs,
4288 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4289 // Remote isn't waiting on any RevokeAndACK from us!
4290 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4292 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4293 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4294 self.context.monitor_pending_revoke_and_ack = true;
4297 Some(self.get_last_revoke_and_ack())
4300 debug_assert!(false, "All values should have been handled in the four cases above");
4301 return Err(ChannelError::Close(format!(
4302 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4303 msg.next_remote_commitment_number,
4304 our_commitment_transaction
4308 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4309 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4310 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4311 // the corresponding revoke_and_ack back yet.
4312 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4313 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4314 self.mark_awaiting_response();
4316 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4318 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4319 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4320 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4321 Some(msgs::ChannelReady {
4322 channel_id: self.context.channel_id(),
4323 next_per_commitment_point,
4324 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4328 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4329 if required_revoke.is_some() {
4330 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4332 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4335 Ok(ReestablishResponses {
4336 channel_ready, shutdown_msg, announcement_sigs,
4337 raa: required_revoke,
4338 commitment_update: None,
4339 order: self.context.resend_order.clone(),
4341 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4342 if required_revoke.is_some() {
4343 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4345 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4348 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4349 self.context.monitor_pending_commitment_signed = true;
4350 Ok(ReestablishResponses {
4351 channel_ready, shutdown_msg, announcement_sigs,
4352 commitment_update: None, raa: None,
4353 order: self.context.resend_order.clone(),
4356 Ok(ReestablishResponses {
4357 channel_ready, shutdown_msg, announcement_sigs,
4358 raa: required_revoke,
4359 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4360 order: self.context.resend_order.clone(),
4363 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4364 Err(ChannelError::Close(format!(
4365 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4366 msg.next_local_commitment_number,
4367 next_counterparty_commitment_number,
4370 Err(ChannelError::Close(format!(
4371 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4372 msg.next_local_commitment_number,
4373 next_counterparty_commitment_number,
4378 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4379 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4380 /// at which point they will be recalculated.
4381 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4383 where F::Target: FeeEstimator
4385 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4387 // Propose a range from our current Background feerate to our Normal feerate plus our
4388 // force_close_avoidance_max_fee_satoshis.
4389 // If we fail to come to consensus, we'll have to force-close.
4390 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4391 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4392 // that we don't expect to need fee bumping
4393 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4394 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4396 // The spec requires that (when the channel does not have anchors) we only send absolute
4397 // channel fees no greater than the absolute channel fee on the current commitment
4398 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4399 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4400 // some force-closure by old nodes, but we wanted to close the channel anyway.
4402 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4403 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4404 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4405 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4408 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4409 // below our dust limit, causing the output to disappear. We don't bother handling this
4410 // case, however, as this should only happen if a channel is closed before any (material)
4411 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4412 // come to consensus with our counterparty on appropriate fees, however it should be a
4413 // relatively rare case. We can revisit this later, though note that in order to determine
4414 // if the funders' output is dust we have to know the absolute fee we're going to use.
4415 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4416 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4417 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4418 // We always add force_close_avoidance_max_fee_satoshis to our normal
4419 // feerate-calculated fee, but allow the max to be overridden if we're using a
4420 // target feerate-calculated fee.
4421 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4422 proposed_max_feerate as u64 * tx_weight / 1000)
4424 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4427 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4428 self.context.closing_fee_limits.clone().unwrap()
4431 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4432 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4433 /// this point if we're the funder we should send the initial closing_signed, and in any case
4434 /// shutdown should complete within a reasonable timeframe.
4435 fn closing_negotiation_ready(&self) -> bool {
4436 self.context.closing_negotiation_ready()
4439 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4440 /// an Err if no progress is being made and the channel should be force-closed instead.
4441 /// Should be called on a one-minute timer.
4442 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4443 if self.closing_negotiation_ready() {
4444 if self.context.closing_signed_in_flight {
4445 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4447 self.context.closing_signed_in_flight = true;
4453 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4454 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4455 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4456 where F::Target: FeeEstimator, L::Target: Logger
4458 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4459 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4460 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4461 // that closing_negotiation_ready checks this case (as well as a few others).
4462 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4463 return Ok((None, None, None));
4466 if !self.context.is_outbound() {
4467 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4468 return self.closing_signed(fee_estimator, &msg);
4470 return Ok((None, None, None));
4473 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4474 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4475 if self.context.expecting_peer_commitment_signed {
4476 return Ok((None, None, None));
4479 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4481 assert!(self.context.shutdown_scriptpubkey.is_some());
4482 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4483 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4484 our_min_fee, our_max_fee, total_fee_satoshis);
4486 match &self.context.holder_signer {
4487 ChannelSignerType::Ecdsa(ecdsa) => {
4489 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4490 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4492 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4493 Ok((Some(msgs::ClosingSigned {
4494 channel_id: self.context.channel_id,
4495 fee_satoshis: total_fee_satoshis,
4497 fee_range: Some(msgs::ClosingSignedFeeRange {
4498 min_fee_satoshis: our_min_fee,
4499 max_fee_satoshis: our_max_fee,
4503 // TODO (taproot|arik)
4509 // Marks a channel as waiting for a response from the counterparty. If it's not received
4510 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4512 fn mark_awaiting_response(&mut self) {
4513 self.context.sent_message_awaiting_response = Some(0);
4516 /// Determines whether we should disconnect the counterparty due to not receiving a response
4517 /// within our expected timeframe.
4519 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4520 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4521 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4524 // Don't disconnect when we're not waiting on a response.
4527 *ticks_elapsed += 1;
4528 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4532 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4533 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4535 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4536 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4538 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4539 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4540 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4541 // can do that via error message without getting a connection fail anyway...
4542 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4544 for htlc in self.context.pending_inbound_htlcs.iter() {
4545 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4546 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4549 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4551 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4552 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4555 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4556 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4557 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4560 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4563 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4564 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4565 // any further commitment updates after we set LocalShutdownSent.
4566 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4568 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4571 assert!(send_shutdown);
4572 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4573 Ok(scriptpubkey) => scriptpubkey,
4574 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4576 if !shutdown_scriptpubkey.is_compatible(their_features) {
4577 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4579 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4584 // From here on out, we may not fail!
4586 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4587 self.context.update_time_counter += 1;
4589 let monitor_update = if update_shutdown_script {
4590 self.context.latest_monitor_update_id += 1;
4591 let monitor_update = ChannelMonitorUpdate {
4592 update_id: self.context.latest_monitor_update_id,
4593 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4594 scriptpubkey: self.get_closing_scriptpubkey(),
4597 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4598 self.push_ret_blockable_mon_update(monitor_update)
4600 let shutdown = if send_shutdown {
4601 Some(msgs::Shutdown {
4602 channel_id: self.context.channel_id,
4603 scriptpubkey: self.get_closing_scriptpubkey(),
4607 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4608 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4609 // cell HTLCs and return them to fail the payment.
4610 self.context.holding_cell_update_fee = None;
4611 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4612 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4614 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4615 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4622 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4623 self.context.update_time_counter += 1;
4625 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4628 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4629 let mut tx = closing_tx.trust().built_transaction().clone();
4631 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4633 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4634 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4635 let mut holder_sig = sig.serialize_der().to_vec();
4636 holder_sig.push(EcdsaSighashType::All as u8);
4637 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4638 cp_sig.push(EcdsaSighashType::All as u8);
4639 if funding_key[..] < counterparty_funding_key[..] {
4640 tx.input[0].witness.push(holder_sig);
4641 tx.input[0].witness.push(cp_sig);
4643 tx.input[0].witness.push(cp_sig);
4644 tx.input[0].witness.push(holder_sig);
4647 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4651 pub fn closing_signed<F: Deref>(
4652 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4653 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4654 where F::Target: FeeEstimator
4656 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4657 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4659 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4660 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4662 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4663 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4665 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4666 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4669 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4670 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4673 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4674 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4675 return Ok((None, None, None));
4678 let funding_redeemscript = self.context.get_funding_redeemscript();
4679 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4680 if used_total_fee != msg.fee_satoshis {
4681 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4683 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4685 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4688 // The remote end may have decided to revoke their output due to inconsistent dust
4689 // limits, so check for that case by re-checking the signature here.
4690 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4691 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4692 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4696 for outp in closing_tx.trust().built_transaction().output.iter() {
4697 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4698 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4702 assert!(self.context.shutdown_scriptpubkey.is_some());
4703 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4704 if last_fee == msg.fee_satoshis {
4705 let shutdown_result = ShutdownResult {
4706 monitor_update: None,
4707 dropped_outbound_htlcs: Vec::new(),
4708 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4710 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4711 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4712 self.context.update_time_counter += 1;
4713 return Ok((None, Some(tx), Some(shutdown_result)));
4717 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4719 macro_rules! propose_fee {
4720 ($new_fee: expr) => {
4721 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4722 (closing_tx, $new_fee)
4724 self.build_closing_transaction($new_fee, false)
4727 return match &self.context.holder_signer {
4728 ChannelSignerType::Ecdsa(ecdsa) => {
4730 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4731 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4732 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4733 let shutdown_result = ShutdownResult {
4734 monitor_update: None,
4735 dropped_outbound_htlcs: Vec::new(),
4736 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4738 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4739 self.context.update_time_counter += 1;
4740 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4741 (Some(tx), Some(shutdown_result))
4746 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4747 Ok((Some(msgs::ClosingSigned {
4748 channel_id: self.context.channel_id,
4749 fee_satoshis: used_fee,
4751 fee_range: Some(msgs::ClosingSignedFeeRange {
4752 min_fee_satoshis: our_min_fee,
4753 max_fee_satoshis: our_max_fee,
4755 }), signed_tx, shutdown_result))
4757 // TODO (taproot|arik)
4764 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4765 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4766 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4768 if max_fee_satoshis < our_min_fee {
4769 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4771 if min_fee_satoshis > our_max_fee {
4772 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4775 if !self.context.is_outbound() {
4776 // They have to pay, so pick the highest fee in the overlapping range.
4777 // We should never set an upper bound aside from their full balance
4778 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4779 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4781 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4782 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4783 msg.fee_satoshis, our_min_fee, our_max_fee)));
4785 // The proposed fee is in our acceptable range, accept it and broadcast!
4786 propose_fee!(msg.fee_satoshis);
4789 // Old fee style negotiation. We don't bother to enforce whether they are complying
4790 // with the "making progress" requirements, we just comply and hope for the best.
4791 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4792 if msg.fee_satoshis > last_fee {
4793 if msg.fee_satoshis < our_max_fee {
4794 propose_fee!(msg.fee_satoshis);
4795 } else if last_fee < our_max_fee {
4796 propose_fee!(our_max_fee);
4798 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4801 if msg.fee_satoshis > our_min_fee {
4802 propose_fee!(msg.fee_satoshis);
4803 } else if last_fee > our_min_fee {
4804 propose_fee!(our_min_fee);
4806 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4810 if msg.fee_satoshis < our_min_fee {
4811 propose_fee!(our_min_fee);
4812 } else if msg.fee_satoshis > our_max_fee {
4813 propose_fee!(our_max_fee);
4815 propose_fee!(msg.fee_satoshis);
4821 fn internal_htlc_satisfies_config(
4822 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4823 ) -> Result<(), (&'static str, u16)> {
4824 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4825 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4826 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4827 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4829 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4830 0x1000 | 12, // fee_insufficient
4833 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4835 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4836 0x1000 | 13, // incorrect_cltv_expiry
4842 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4843 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4844 /// unsuccessful, falls back to the previous one if one exists.
4845 pub fn htlc_satisfies_config(
4846 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4847 ) -> Result<(), (&'static str, u16)> {
4848 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4850 if let Some(prev_config) = self.context.prev_config() {
4851 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4858 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4859 self.context.cur_holder_commitment_transaction_number + 1
4862 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4863 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4866 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4867 self.context.cur_counterparty_commitment_transaction_number + 2
4871 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
4872 &self.context.holder_signer
4876 pub fn get_value_stat(&self) -> ChannelValueStat {
4878 value_to_self_msat: self.context.value_to_self_msat,
4879 channel_value_msat: self.context.channel_value_satoshis * 1000,
4880 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4881 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4882 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4883 holding_cell_outbound_amount_msat: {
4885 for h in self.context.holding_cell_htlc_updates.iter() {
4887 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4895 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4896 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4900 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4901 /// Allowed in any state (including after shutdown)
4902 pub fn is_awaiting_monitor_update(&self) -> bool {
4903 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4906 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4907 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4908 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4909 self.context.blocked_monitor_updates[0].update.update_id - 1
4912 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4913 /// further blocked monitor update exists after the next.
4914 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4915 if self.context.blocked_monitor_updates.is_empty() { return None; }
4916 Some((self.context.blocked_monitor_updates.remove(0).update,
4917 !self.context.blocked_monitor_updates.is_empty()))
4920 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4921 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4922 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4923 -> Option<ChannelMonitorUpdate> {
4924 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4925 if !release_monitor {
4926 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4935 pub fn blocked_monitor_updates_pending(&self) -> usize {
4936 self.context.blocked_monitor_updates.len()
4939 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4940 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4941 /// transaction. If the channel is inbound, this implies simply that the channel has not
4943 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4944 if !self.is_awaiting_monitor_update() { return false; }
4945 if self.context.channel_state &
4946 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4947 == ChannelState::FundingSent as u32 {
4948 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4949 // FundingSent set, though our peer could have sent their channel_ready.
4950 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4953 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4954 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4955 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4956 // waiting for the initial monitor persistence. Thus, we check if our commitment
4957 // transaction numbers have both been iterated only exactly once (for the
4958 // funding_signed), and we're awaiting monitor update.
4960 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4961 // only way to get an awaiting-monitor-update state during initial funding is if the
4962 // initial monitor persistence is still pending).
4964 // Because deciding we're awaiting initial broadcast spuriously could result in
4965 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4966 // we hard-assert here, even in production builds.
4967 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4968 assert!(self.context.monitor_pending_channel_ready);
4969 assert_eq!(self.context.latest_monitor_update_id, 0);
4975 /// Returns true if our channel_ready has been sent
4976 pub fn is_our_channel_ready(&self) -> bool {
4977 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4980 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4981 pub fn received_shutdown(&self) -> bool {
4982 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4985 /// Returns true if we either initiated or agreed to shut down the channel.
4986 pub fn sent_shutdown(&self) -> bool {
4987 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4990 /// Returns true if this channel is fully shut down. True here implies that no further actions
4991 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4992 /// will be handled appropriately by the chain monitor.
4993 pub fn is_shutdown(&self) -> bool {
4994 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
4995 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
5000 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5001 self.context.channel_update_status
5004 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5005 self.context.update_time_counter += 1;
5006 self.context.channel_update_status = status;
5009 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5011 // * always when a new block/transactions are confirmed with the new height
5012 // * when funding is signed with a height of 0
5013 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5017 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5018 if funding_tx_confirmations <= 0 {
5019 self.context.funding_tx_confirmation_height = 0;
5022 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5026 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5027 // channel_ready yet.
5028 if self.context.signer_pending_funding {
5032 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5033 // channel_ready until the entire batch is ready.
5034 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5035 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5036 self.context.channel_state |= ChannelState::OurChannelReady as u32;
5038 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5039 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
5040 self.context.update_time_counter += 1;
5042 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5043 // We got a reorg but not enough to trigger a force close, just ignore.
5046 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
5047 // We should never see a funding transaction on-chain until we've received
5048 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5049 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5050 // however, may do this and we shouldn't treat it as a bug.
5051 #[cfg(not(fuzzing))]
5052 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5053 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5054 self.context.channel_state);
5056 // We got a reorg but not enough to trigger a force close, just ignore.
5060 if need_commitment_update {
5061 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5062 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5063 let next_per_commitment_point =
5064 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5065 return Some(msgs::ChannelReady {
5066 channel_id: self.context.channel_id,
5067 next_per_commitment_point,
5068 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5072 self.context.monitor_pending_channel_ready = true;
5078 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5079 /// In the first case, we store the confirmation height and calculating the short channel id.
5080 /// In the second, we simply return an Err indicating we need to be force-closed now.
5081 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5082 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5083 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5084 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5086 NS::Target: NodeSigner,
5089 let mut msgs = (None, None);
5090 if let Some(funding_txo) = self.context.get_funding_txo() {
5091 for &(index_in_block, tx) in txdata.iter() {
5092 // Check if the transaction is the expected funding transaction, and if it is,
5093 // check that it pays the right amount to the right script.
5094 if self.context.funding_tx_confirmation_height == 0 {
5095 if tx.txid() == funding_txo.txid {
5096 let txo_idx = funding_txo.index as usize;
5097 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5098 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5099 if self.context.is_outbound() {
5100 // If we generated the funding transaction and it doesn't match what it
5101 // should, the client is really broken and we should just panic and
5102 // tell them off. That said, because hash collisions happen with high
5103 // probability in fuzzing mode, if we're fuzzing we just close the
5104 // channel and move on.
5105 #[cfg(not(fuzzing))]
5106 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5108 self.context.update_time_counter += 1;
5109 let err_reason = "funding tx had wrong script/value or output index";
5110 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5112 if self.context.is_outbound() {
5113 if !tx.is_coin_base() {
5114 for input in tx.input.iter() {
5115 if input.witness.is_empty() {
5116 // We generated a malleable funding transaction, implying we've
5117 // just exposed ourselves to funds loss to our counterparty.
5118 #[cfg(not(fuzzing))]
5119 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5124 self.context.funding_tx_confirmation_height = height;
5125 self.context.funding_tx_confirmed_in = Some(*block_hash);
5126 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5127 Ok(scid) => Some(scid),
5128 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5131 // If this is a coinbase transaction and not a 0-conf channel
5132 // we should update our min_depth to 100 to handle coinbase maturity
5133 if tx.is_coin_base() &&
5134 self.context.minimum_depth.unwrap_or(0) > 0 &&
5135 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5136 self.context.minimum_depth = Some(COINBASE_MATURITY);
5139 // If we allow 1-conf funding, we may need to check for channel_ready here and
5140 // send it immediately instead of waiting for a best_block_updated call (which
5141 // may have already happened for this block).
5142 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5143 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5144 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5145 msgs = (Some(channel_ready), announcement_sigs);
5148 for inp in tx.input.iter() {
5149 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5150 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5151 return Err(ClosureReason::CommitmentTxConfirmed);
5159 /// When a new block is connected, we check the height of the block against outbound holding
5160 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5161 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5162 /// handled by the ChannelMonitor.
5164 /// If we return Err, the channel may have been closed, at which point the standard
5165 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5168 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5170 pub fn best_block_updated<NS: Deref, L: Deref>(
5171 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5172 node_signer: &NS, user_config: &UserConfig, logger: &L
5173 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5175 NS::Target: NodeSigner,
5178 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5181 fn do_best_block_updated<NS: Deref, L: Deref>(
5182 &mut self, height: u32, highest_header_time: u32,
5183 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5184 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5186 NS::Target: NodeSigner,
5189 let mut timed_out_htlcs = Vec::new();
5190 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5191 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5193 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5194 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5196 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5197 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5198 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5206 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5208 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5209 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5210 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5212 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5213 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5216 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5217 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5218 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5219 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5220 if self.context.funding_tx_confirmation_height == 0 {
5221 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5222 // zero if it has been reorged out, however in either case, our state flags
5223 // indicate we've already sent a channel_ready
5224 funding_tx_confirmations = 0;
5227 // If we've sent channel_ready (or have both sent and received channel_ready), and
5228 // the funding transaction has become unconfirmed,
5229 // close the channel and hope we can get the latest state on chain (because presumably
5230 // the funding transaction is at least still in the mempool of most nodes).
5232 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5233 // 0-conf channel, but not doing so may lead to the
5234 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5236 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5237 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5238 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5239 return Err(ClosureReason::ProcessingError { err: err_reason });
5241 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5242 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5243 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5244 // If funding_tx_confirmed_in is unset, the channel must not be active
5245 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5246 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5247 return Err(ClosureReason::FundingTimedOut);
5250 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5251 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5253 Ok((None, timed_out_htlcs, announcement_sigs))
5256 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5257 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5258 /// before the channel has reached channel_ready and we can just wait for more blocks.
5259 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5260 if self.context.funding_tx_confirmation_height != 0 {
5261 // We handle the funding disconnection by calling best_block_updated with a height one
5262 // below where our funding was connected, implying a reorg back to conf_height - 1.
5263 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5264 // We use the time field to bump the current time we set on channel updates if its
5265 // larger. If we don't know that time has moved forward, we can just set it to the last
5266 // time we saw and it will be ignored.
5267 let best_time = self.context.update_time_counter;
5268 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5269 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5270 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5271 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5272 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5278 // We never learned about the funding confirmation anyway, just ignore
5283 // Methods to get unprompted messages to send to the remote end (or where we already returned
5284 // something in the handler for the message that prompted this message):
5286 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5287 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5288 /// directions). Should be used for both broadcasted announcements and in response to an
5289 /// AnnouncementSignatures message from the remote peer.
5291 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5294 /// This will only return ChannelError::Ignore upon failure.
5296 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5297 fn get_channel_announcement<NS: Deref>(
5298 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5299 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5300 if !self.context.config.announced_channel {
5301 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5303 if !self.context.is_usable() {
5304 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5307 let short_channel_id = self.context.get_short_channel_id()
5308 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5309 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5310 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5311 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5312 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5314 let msg = msgs::UnsignedChannelAnnouncement {
5315 features: channelmanager::provided_channel_features(&user_config),
5318 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5319 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5320 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5321 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5322 excess_data: Vec::new(),
5328 fn get_announcement_sigs<NS: Deref, L: Deref>(
5329 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5330 best_block_height: u32, logger: &L
5331 ) -> Option<msgs::AnnouncementSignatures>
5333 NS::Target: NodeSigner,
5336 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5340 if !self.context.is_usable() {
5344 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5345 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5349 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5353 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5354 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5357 log_trace!(logger, "{:?}", e);
5361 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5363 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5368 match &self.context.holder_signer {
5369 ChannelSignerType::Ecdsa(ecdsa) => {
5370 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5372 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5377 let short_channel_id = match self.context.get_short_channel_id() {
5379 None => return None,
5382 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5384 Some(msgs::AnnouncementSignatures {
5385 channel_id: self.context.channel_id(),
5387 node_signature: our_node_sig,
5388 bitcoin_signature: our_bitcoin_sig,
5391 // TODO (taproot|arik)
5397 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5399 fn sign_channel_announcement<NS: Deref>(
5400 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5401 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5402 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5403 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5404 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5405 let were_node_one = announcement.node_id_1 == our_node_key;
5407 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5408 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5409 match &self.context.holder_signer {
5410 ChannelSignerType::Ecdsa(ecdsa) => {
5411 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5412 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5413 Ok(msgs::ChannelAnnouncement {
5414 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5415 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5416 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5417 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5418 contents: announcement,
5421 // TODO (taproot|arik)
5426 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5430 /// Processes an incoming announcement_signatures message, providing a fully-signed
5431 /// channel_announcement message which we can broadcast and storing our counterparty's
5432 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5433 pub fn announcement_signatures<NS: Deref>(
5434 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5435 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5436 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5437 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5439 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5441 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5442 return Err(ChannelError::Close(format!(
5443 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5444 &announcement, self.context.get_counterparty_node_id())));
5446 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5447 return Err(ChannelError::Close(format!(
5448 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5449 &announcement, self.context.counterparty_funding_pubkey())));
5452 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5453 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5454 return Err(ChannelError::Ignore(
5455 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5458 self.sign_channel_announcement(node_signer, announcement)
5461 /// Gets a signed channel_announcement for this channel, if we previously received an
5462 /// announcement_signatures from our counterparty.
5463 pub fn get_signed_channel_announcement<NS: Deref>(
5464 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5465 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5466 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5469 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5471 Err(_) => return None,
5473 match self.sign_channel_announcement(node_signer, announcement) {
5474 Ok(res) => Some(res),
5479 /// May panic if called on a channel that wasn't immediately-previously
5480 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5481 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5482 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5483 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5484 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5485 // current to_remote balances. However, it no longer has any use, and thus is now simply
5486 // set to a dummy (but valid, as required by the spec) public key.
5487 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5488 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5489 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5490 let mut pk = [2; 33]; pk[1] = 0xff;
5491 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5492 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5493 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5494 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5497 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5500 self.mark_awaiting_response();
5501 msgs::ChannelReestablish {
5502 channel_id: self.context.channel_id(),
5503 // The protocol has two different commitment number concepts - the "commitment
5504 // transaction number", which starts from 0 and counts up, and the "revocation key
5505 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5506 // commitment transaction numbers by the index which will be used to reveal the
5507 // revocation key for that commitment transaction, which means we have to convert them
5508 // to protocol-level commitment numbers here...
5510 // next_local_commitment_number is the next commitment_signed number we expect to
5511 // receive (indicating if they need to resend one that we missed).
5512 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5513 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5514 // receive, however we track it by the next commitment number for a remote transaction
5515 // (which is one further, as they always revoke previous commitment transaction, not
5516 // the one we send) so we have to decrement by 1. Note that if
5517 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5518 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5520 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5521 your_last_per_commitment_secret: remote_last_secret,
5522 my_current_per_commitment_point: dummy_pubkey,
5523 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5524 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5525 // txid of that interactive transaction, else we MUST NOT set it.
5526 next_funding_txid: None,
5531 // Send stuff to our remote peers:
5533 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5534 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5535 /// commitment update.
5537 /// `Err`s will only be [`ChannelError::Ignore`].
5538 pub fn queue_add_htlc<F: Deref, L: Deref>(
5539 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5540 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5541 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5542 ) -> Result<(), ChannelError>
5543 where F::Target: FeeEstimator, L::Target: Logger
5546 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5547 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5548 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5550 if let ChannelError::Ignore(_) = err { /* fine */ }
5551 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5556 /// Adds a pending outbound HTLC to this channel, note that you probably want
5557 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5559 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5561 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5562 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5564 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5565 /// we may not yet have sent the previous commitment update messages and will need to
5566 /// regenerate them.
5568 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5569 /// on this [`Channel`] if `force_holding_cell` is false.
5571 /// `Err`s will only be [`ChannelError::Ignore`].
5572 fn send_htlc<F: Deref, L: Deref>(
5573 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5574 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5575 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5576 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5577 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5578 where F::Target: FeeEstimator, L::Target: Logger
5580 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5581 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5583 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5584 if amount_msat > channel_total_msat {
5585 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5588 if amount_msat == 0 {
5589 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5592 let available_balances = self.context.get_available_balances(fee_estimator);
5593 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5594 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5595 available_balances.next_outbound_htlc_minimum_msat)));
5598 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5599 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5600 available_balances.next_outbound_htlc_limit_msat)));
5603 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5604 // Note that this should never really happen, if we're !is_live() on receipt of an
5605 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5606 // the user to send directly into a !is_live() channel. However, if we
5607 // disconnected during the time the previous hop was doing the commitment dance we may
5608 // end up getting here after the forwarding delay. In any case, returning an
5609 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5610 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5613 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5614 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5615 payment_hash, amount_msat,
5616 if force_holding_cell { "into holding cell" }
5617 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5618 else { "to peer" });
5620 if need_holding_cell {
5621 force_holding_cell = true;
5624 // Now update local state:
5625 if force_holding_cell {
5626 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5631 onion_routing_packet,
5638 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5639 htlc_id: self.context.next_holder_htlc_id,
5641 payment_hash: payment_hash.clone(),
5643 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5649 let res = msgs::UpdateAddHTLC {
5650 channel_id: self.context.channel_id,
5651 htlc_id: self.context.next_holder_htlc_id,
5655 onion_routing_packet,
5659 self.context.next_holder_htlc_id += 1;
5664 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5665 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5666 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5667 // fail to generate this, we still are at least at a position where upgrading their status
5669 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5670 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5671 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5673 if let Some(state) = new_state {
5674 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5678 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5679 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5680 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5681 // Grab the preimage, if it exists, instead of cloning
5682 let mut reason = OutboundHTLCOutcome::Success(None);
5683 mem::swap(outcome, &mut reason);
5684 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5687 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5688 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5689 debug_assert!(!self.context.is_outbound());
5690 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5691 self.context.feerate_per_kw = feerate;
5692 self.context.pending_update_fee = None;
5695 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5697 let (mut htlcs_ref, counterparty_commitment_tx) =
5698 self.build_commitment_no_state_update(logger);
5699 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5700 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5701 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5703 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5704 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5707 self.context.latest_monitor_update_id += 1;
5708 let monitor_update = ChannelMonitorUpdate {
5709 update_id: self.context.latest_monitor_update_id,
5710 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5711 commitment_txid: counterparty_commitment_txid,
5712 htlc_outputs: htlcs.clone(),
5713 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5714 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5715 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5716 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5717 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5720 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5724 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5725 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5726 where L::Target: Logger
5728 let counterparty_keys = self.context.build_remote_transaction_keys();
5729 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5730 let counterparty_commitment_tx = commitment_stats.tx;
5732 #[cfg(any(test, fuzzing))]
5734 if !self.context.is_outbound() {
5735 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5736 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5737 if let Some(info) = projected_commit_tx_info {
5738 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5739 if info.total_pending_htlcs == total_pending_htlcs
5740 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5741 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5742 && info.feerate == self.context.feerate_per_kw {
5743 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5744 assert_eq!(actual_fee, info.fee);
5750 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5753 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5754 /// generation when we shouldn't change HTLC/channel state.
5755 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5756 // Get the fee tests from `build_commitment_no_state_update`
5757 #[cfg(any(test, fuzzing))]
5758 self.build_commitment_no_state_update(logger);
5760 let counterparty_keys = self.context.build_remote_transaction_keys();
5761 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5762 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5764 match &self.context.holder_signer {
5765 ChannelSignerType::Ecdsa(ecdsa) => {
5766 let (signature, htlc_signatures);
5769 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5770 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5774 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5775 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5777 htlc_signatures = res.1;
5779 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5780 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5781 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5782 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5784 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5785 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5786 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5787 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5788 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5789 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5793 Ok((msgs::CommitmentSigned {
5794 channel_id: self.context.channel_id,
5798 partial_signature_with_nonce: None,
5799 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5801 // TODO (taproot|arik)
5807 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5808 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5810 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5811 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5812 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5813 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5814 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5815 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5816 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5817 where F::Target: FeeEstimator, L::Target: Logger
5819 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5820 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
5821 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5824 let monitor_update = self.build_commitment_no_status_check(logger);
5825 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5826 Ok(self.push_ret_blockable_mon_update(monitor_update))
5832 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5834 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5835 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5836 fee_base_msat: msg.contents.fee_base_msat,
5837 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5838 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5840 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5842 self.context.counterparty_forwarding_info = new_forwarding_info;
5848 /// Begins the shutdown process, getting a message for the remote peer and returning all
5849 /// holding cell HTLCs for payment failure.
5851 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5852 /// [`ChannelMonitorUpdate`] will be returned).
5853 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5854 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5855 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5857 for htlc in self.context.pending_outbound_htlcs.iter() {
5858 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5859 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5862 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5863 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5864 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5866 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5867 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5870 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5871 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5873 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5874 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5875 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5878 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5879 // script is set, we just force-close and call it a day.
5880 let mut chan_closed = false;
5881 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5885 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5887 None if !chan_closed => {
5888 // use override shutdown script if provided
5889 let shutdown_scriptpubkey = match override_shutdown_script {
5890 Some(script) => script,
5892 // otherwise, use the shutdown scriptpubkey provided by the signer
5893 match signer_provider.get_shutdown_scriptpubkey() {
5894 Ok(scriptpubkey) => scriptpubkey,
5895 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5899 if !shutdown_scriptpubkey.is_compatible(their_features) {
5900 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5902 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5908 // From here on out, we may not fail!
5909 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5910 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5911 let shutdown_result = ShutdownResult {
5912 monitor_update: None,
5913 dropped_outbound_htlcs: Vec::new(),
5914 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5916 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5917 Some(shutdown_result)
5919 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5922 self.context.update_time_counter += 1;
5924 let monitor_update = if update_shutdown_script {
5925 self.context.latest_monitor_update_id += 1;
5926 let monitor_update = ChannelMonitorUpdate {
5927 update_id: self.context.latest_monitor_update_id,
5928 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5929 scriptpubkey: self.get_closing_scriptpubkey(),
5932 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5933 self.push_ret_blockable_mon_update(monitor_update)
5935 let shutdown = msgs::Shutdown {
5936 channel_id: self.context.channel_id,
5937 scriptpubkey: self.get_closing_scriptpubkey(),
5940 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5941 // our shutdown until we've committed all of the pending changes.
5942 self.context.holding_cell_update_fee = None;
5943 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5944 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5946 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5947 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5954 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5955 "we can't both complete shutdown and return a monitor update");
5957 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5960 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5961 self.context.holding_cell_htlc_updates.iter()
5962 .flat_map(|htlc_update| {
5964 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5965 => Some((source, payment_hash)),
5969 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5973 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5974 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5975 pub context: ChannelContext<SP>,
5976 pub unfunded_context: UnfundedChannelContext,
5979 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5980 pub fn new<ES: Deref, F: Deref>(
5981 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5982 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5983 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
5984 ) -> Result<OutboundV1Channel<SP>, APIError>
5985 where ES::Target: EntropySource,
5986 F::Target: FeeEstimator
5988 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5989 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5990 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5991 let pubkeys = holder_signer.pubkeys().clone();
5993 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5994 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5996 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5997 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
5999 let channel_value_msat = channel_value_satoshis * 1000;
6000 if push_msat > channel_value_msat {
6001 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6003 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6004 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6006 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6007 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6008 // Protocol level safety check in place, although it should never happen because
6009 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6010 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6013 let channel_type = Self::get_initial_channel_type(&config, their_features);
6014 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6016 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6017 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6019 (ConfirmationTarget::NonAnchorChannelFee, 0)
6021 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6023 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6024 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6025 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6026 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6029 let mut secp_ctx = Secp256k1::new();
6030 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6032 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6033 match signer_provider.get_shutdown_scriptpubkey() {
6034 Ok(scriptpubkey) => Some(scriptpubkey),
6035 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6039 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6040 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6041 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6045 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6046 Ok(script) => script,
6047 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6050 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6053 context: ChannelContext {
6056 config: LegacyChannelConfig {
6057 options: config.channel_config.clone(),
6058 announced_channel: config.channel_handshake_config.announced_channel,
6059 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6064 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6066 channel_id: temporary_channel_id,
6067 temporary_channel_id: Some(temporary_channel_id),
6068 channel_state: ChannelState::OurInitSent as u32,
6069 announcement_sigs_state: AnnouncementSigsState::NotSent,
6071 channel_value_satoshis,
6073 latest_monitor_update_id: 0,
6075 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6076 shutdown_scriptpubkey,
6079 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6080 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6083 pending_inbound_htlcs: Vec::new(),
6084 pending_outbound_htlcs: Vec::new(),
6085 holding_cell_htlc_updates: Vec::new(),
6086 pending_update_fee: None,
6087 holding_cell_update_fee: None,
6088 next_holder_htlc_id: 0,
6089 next_counterparty_htlc_id: 0,
6090 update_time_counter: 1,
6092 resend_order: RAACommitmentOrder::CommitmentFirst,
6094 monitor_pending_channel_ready: false,
6095 monitor_pending_revoke_and_ack: false,
6096 monitor_pending_commitment_signed: false,
6097 monitor_pending_forwards: Vec::new(),
6098 monitor_pending_failures: Vec::new(),
6099 monitor_pending_finalized_fulfills: Vec::new(),
6101 signer_pending_commitment_update: false,
6102 signer_pending_funding: false,
6104 #[cfg(debug_assertions)]
6105 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6106 #[cfg(debug_assertions)]
6107 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6109 last_sent_closing_fee: None,
6110 pending_counterparty_closing_signed: None,
6111 expecting_peer_commitment_signed: false,
6112 closing_fee_limits: None,
6113 target_closing_feerate_sats_per_kw: None,
6115 funding_tx_confirmed_in: None,
6116 funding_tx_confirmation_height: 0,
6117 short_channel_id: None,
6118 channel_creation_height: current_chain_height,
6120 feerate_per_kw: commitment_feerate,
6121 counterparty_dust_limit_satoshis: 0,
6122 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6123 counterparty_max_htlc_value_in_flight_msat: 0,
6124 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6125 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6126 holder_selected_channel_reserve_satoshis,
6127 counterparty_htlc_minimum_msat: 0,
6128 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6129 counterparty_max_accepted_htlcs: 0,
6130 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6131 minimum_depth: None, // Filled in in accept_channel
6133 counterparty_forwarding_info: None,
6135 channel_transaction_parameters: ChannelTransactionParameters {
6136 holder_pubkeys: pubkeys,
6137 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6138 is_outbound_from_holder: true,
6139 counterparty_parameters: None,
6140 funding_outpoint: None,
6141 channel_type_features: channel_type.clone()
6143 funding_transaction: None,
6144 is_batch_funding: None,
6146 counterparty_cur_commitment_point: None,
6147 counterparty_prev_commitment_point: None,
6148 counterparty_node_id,
6150 counterparty_shutdown_scriptpubkey: None,
6152 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6154 channel_update_status: ChannelUpdateStatus::Enabled,
6155 closing_signed_in_flight: false,
6157 announcement_sigs: None,
6159 #[cfg(any(test, fuzzing))]
6160 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6161 #[cfg(any(test, fuzzing))]
6162 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6164 workaround_lnd_bug_4006: None,
6165 sent_message_awaiting_response: None,
6167 latest_inbound_scid_alias: None,
6168 outbound_scid_alias,
6170 channel_pending_event_emitted: false,
6171 channel_ready_event_emitted: false,
6173 #[cfg(any(test, fuzzing))]
6174 historical_inbound_htlc_fulfills: HashSet::new(),
6179 blocked_monitor_updates: Vec::new(),
6181 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6185 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6186 /// a funding_created message for the remote peer.
6187 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6188 /// or if called on an inbound channel.
6189 /// Note that channel_id changes during this call!
6190 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6191 /// If an Err is returned, it is a ChannelError::Close.
6192 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6193 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6194 if !self.context.is_outbound() {
6195 panic!("Tried to create outbound funding_created message on an inbound channel!");
6197 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6198 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6200 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6201 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6202 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6203 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6206 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6207 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6209 // Now that we're past error-generating stuff, update our local state:
6211 self.context.channel_state = ChannelState::FundingCreated as u32;
6212 self.context.channel_id = funding_txo.to_channel_id();
6214 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6215 // We can skip this if it is a zero-conf channel.
6216 if funding_transaction.is_coin_base() &&
6217 self.context.minimum_depth.unwrap_or(0) > 0 &&
6218 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6219 self.context.minimum_depth = Some(COINBASE_MATURITY);
6222 self.context.funding_transaction = Some(funding_transaction);
6223 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6225 let funding_created = self.context.get_funding_created_msg(logger);
6226 if funding_created.is_none() {
6227 if !self.context.signer_pending_funding {
6228 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6229 self.context.signer_pending_funding = true;
6233 let channel = Channel {
6234 context: self.context,
6237 Ok((channel, funding_created))
6240 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6241 // The default channel type (ie the first one we try) depends on whether the channel is
6242 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6243 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6244 // with no other changes, and fall back to `only_static_remotekey`.
6245 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6246 if !config.channel_handshake_config.announced_channel &&
6247 config.channel_handshake_config.negotiate_scid_privacy &&
6248 their_features.supports_scid_privacy() {
6249 ret.set_scid_privacy_required();
6252 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6253 // set it now. If they don't understand it, we'll fall back to our default of
6254 // `only_static_remotekey`.
6255 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6256 their_features.supports_anchors_zero_fee_htlc_tx() {
6257 ret.set_anchors_zero_fee_htlc_tx_required();
6263 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6264 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6265 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6266 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6267 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6268 ) -> Result<msgs::OpenChannel, ()>
6270 F::Target: FeeEstimator
6272 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6273 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6274 // We've exhausted our options
6277 // We support opening a few different types of channels. Try removing our additional
6278 // features one by one until we've either arrived at our default or the counterparty has
6281 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6282 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6283 // checks whether the counterparty supports every feature, this would only happen if the
6284 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6286 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6287 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6288 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6289 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6290 } else if self.context.channel_type.supports_scid_privacy() {
6291 self.context.channel_type.clear_scid_privacy();
6293 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6295 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6296 Ok(self.get_open_channel(chain_hash))
6299 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6300 if !self.context.is_outbound() {
6301 panic!("Tried to open a channel for an inbound channel?");
6303 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6304 panic!("Cannot generate an open_channel after we've moved forward");
6307 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6308 panic!("Tried to send an open_channel for a channel that has already advanced");
6311 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6312 let keys = self.context.get_holder_pubkeys();
6316 temporary_channel_id: self.context.channel_id,
6317 funding_satoshis: self.context.channel_value_satoshis,
6318 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6319 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6320 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6321 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6322 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6323 feerate_per_kw: self.context.feerate_per_kw as u32,
6324 to_self_delay: self.context.get_holder_selected_contest_delay(),
6325 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6326 funding_pubkey: keys.funding_pubkey,
6327 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6328 payment_point: keys.payment_point,
6329 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6330 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6331 first_per_commitment_point,
6332 channel_flags: if self.context.config.announced_channel {1} else {0},
6333 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6334 Some(script) => script.clone().into_inner(),
6335 None => Builder::new().into_script(),
6337 channel_type: Some(self.context.channel_type.clone()),
6342 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6343 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6345 // Check sanity of message fields:
6346 if !self.context.is_outbound() {
6347 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6349 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6350 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6352 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6353 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6355 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6356 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6358 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6359 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6361 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6362 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6363 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6365 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6366 if msg.htlc_minimum_msat >= full_channel_value_msat {
6367 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6369 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6370 if msg.to_self_delay > max_delay_acceptable {
6371 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6373 if msg.max_accepted_htlcs < 1 {
6374 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6376 if msg.max_accepted_htlcs > MAX_HTLCS {
6377 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6380 // Now check against optional parameters as set by config...
6381 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6382 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6384 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6385 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6387 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6388 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6390 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6391 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6393 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6394 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6396 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6397 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6399 if msg.minimum_depth > peer_limits.max_minimum_depth {
6400 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6403 if let Some(ty) = &msg.channel_type {
6404 if *ty != self.context.channel_type {
6405 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6407 } else if their_features.supports_channel_type() {
6408 // Assume they've accepted the channel type as they said they understand it.
6410 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6411 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6412 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6414 self.context.channel_type = channel_type.clone();
6415 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6418 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6419 match &msg.shutdown_scriptpubkey {
6420 &Some(ref script) => {
6421 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6422 if script.len() == 0 {
6425 if !script::is_bolt2_compliant(&script, their_features) {
6426 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6428 Some(script.clone())
6431 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6433 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6438 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6439 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6440 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6441 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6442 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6444 if peer_limits.trust_own_funding_0conf {
6445 self.context.minimum_depth = Some(msg.minimum_depth);
6447 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6450 let counterparty_pubkeys = ChannelPublicKeys {
6451 funding_pubkey: msg.funding_pubkey,
6452 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6453 payment_point: msg.payment_point,
6454 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6455 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6458 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6459 selected_contest_delay: msg.to_self_delay,
6460 pubkeys: counterparty_pubkeys,
6463 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6464 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6466 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6467 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6473 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6474 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6475 pub context: ChannelContext<SP>,
6476 pub unfunded_context: UnfundedChannelContext,
6479 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6480 /// Creates a new channel from a remote sides' request for one.
6481 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6482 pub fn new<ES: Deref, F: Deref, L: Deref>(
6483 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6484 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6485 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6486 current_chain_height: u32, logger: &L, is_0conf: bool,
6487 ) -> Result<InboundV1Channel<SP>, ChannelError>
6488 where ES::Target: EntropySource,
6489 F::Target: FeeEstimator,
6492 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6493 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6495 // First check the channel type is known, failing before we do anything else if we don't
6496 // support this channel type.
6497 let channel_type = if let Some(channel_type) = &msg.channel_type {
6498 if channel_type.supports_any_optional_bits() {
6499 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6502 // We only support the channel types defined by the `ChannelManager` in
6503 // `provided_channel_type_features`. The channel type must always support
6504 // `static_remote_key`.
6505 if !channel_type.requires_static_remote_key() {
6506 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6508 // Make sure we support all of the features behind the channel type.
6509 if !channel_type.is_subset(our_supported_features) {
6510 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6512 if channel_type.requires_scid_privacy() && announced_channel {
6513 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6515 channel_type.clone()
6517 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6518 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6519 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6524 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6525 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6526 let pubkeys = holder_signer.pubkeys().clone();
6527 let counterparty_pubkeys = ChannelPublicKeys {
6528 funding_pubkey: msg.funding_pubkey,
6529 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6530 payment_point: msg.payment_point,
6531 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6532 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6535 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6536 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6539 // Check sanity of message fields:
6540 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6541 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6543 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6544 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6546 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6547 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6549 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6550 if msg.push_msat > full_channel_value_msat {
6551 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6553 if msg.dust_limit_satoshis > msg.funding_satoshis {
6554 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6556 if msg.htlc_minimum_msat >= full_channel_value_msat {
6557 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6559 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6561 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6562 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6563 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6565 if msg.max_accepted_htlcs < 1 {
6566 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6568 if msg.max_accepted_htlcs > MAX_HTLCS {
6569 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6572 // Now check against optional parameters as set by config...
6573 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6574 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6576 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6577 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6579 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6580 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6582 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6583 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6585 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6586 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6588 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6589 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6591 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6592 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6595 // Convert things into internal flags and prep our state:
6597 if config.channel_handshake_limits.force_announced_channel_preference {
6598 if config.channel_handshake_config.announced_channel != announced_channel {
6599 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6603 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6604 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6605 // Protocol level safety check in place, although it should never happen because
6606 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6607 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6609 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6610 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6612 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6613 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6614 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6616 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6617 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6620 // check if the funder's amount for the initial commitment tx is sufficient
6621 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6622 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6623 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6627 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6628 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6629 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6630 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6633 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6634 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6635 // want to push much to us), our counterparty should always have more than our reserve.
6636 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6637 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6640 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6641 match &msg.shutdown_scriptpubkey {
6642 &Some(ref script) => {
6643 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6644 if script.len() == 0 {
6647 if !script::is_bolt2_compliant(&script, their_features) {
6648 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6650 Some(script.clone())
6653 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6655 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6660 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6661 match signer_provider.get_shutdown_scriptpubkey() {
6662 Ok(scriptpubkey) => Some(scriptpubkey),
6663 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6667 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6668 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6669 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6673 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6674 Ok(script) => script,
6675 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6678 let mut secp_ctx = Secp256k1::new();
6679 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6681 let minimum_depth = if is_0conf {
6684 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6688 context: ChannelContext {
6691 config: LegacyChannelConfig {
6692 options: config.channel_config.clone(),
6694 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6699 inbound_handshake_limits_override: None,
6701 temporary_channel_id: Some(msg.temporary_channel_id),
6702 channel_id: msg.temporary_channel_id,
6703 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6704 announcement_sigs_state: AnnouncementSigsState::NotSent,
6707 latest_monitor_update_id: 0,
6709 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6710 shutdown_scriptpubkey,
6713 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6714 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6715 value_to_self_msat: msg.push_msat,
6717 pending_inbound_htlcs: Vec::new(),
6718 pending_outbound_htlcs: Vec::new(),
6719 holding_cell_htlc_updates: Vec::new(),
6720 pending_update_fee: None,
6721 holding_cell_update_fee: None,
6722 next_holder_htlc_id: 0,
6723 next_counterparty_htlc_id: 0,
6724 update_time_counter: 1,
6726 resend_order: RAACommitmentOrder::CommitmentFirst,
6728 monitor_pending_channel_ready: false,
6729 monitor_pending_revoke_and_ack: false,
6730 monitor_pending_commitment_signed: false,
6731 monitor_pending_forwards: Vec::new(),
6732 monitor_pending_failures: Vec::new(),
6733 monitor_pending_finalized_fulfills: Vec::new(),
6735 signer_pending_commitment_update: false,
6736 signer_pending_funding: false,
6738 #[cfg(debug_assertions)]
6739 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6740 #[cfg(debug_assertions)]
6741 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6743 last_sent_closing_fee: None,
6744 pending_counterparty_closing_signed: None,
6745 expecting_peer_commitment_signed: false,
6746 closing_fee_limits: None,
6747 target_closing_feerate_sats_per_kw: None,
6749 funding_tx_confirmed_in: None,
6750 funding_tx_confirmation_height: 0,
6751 short_channel_id: None,
6752 channel_creation_height: current_chain_height,
6754 feerate_per_kw: msg.feerate_per_kw,
6755 channel_value_satoshis: msg.funding_satoshis,
6756 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6757 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6758 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6759 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6760 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6761 holder_selected_channel_reserve_satoshis,
6762 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6763 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6764 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6765 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6768 counterparty_forwarding_info: None,
6770 channel_transaction_parameters: ChannelTransactionParameters {
6771 holder_pubkeys: pubkeys,
6772 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6773 is_outbound_from_holder: false,
6774 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6775 selected_contest_delay: msg.to_self_delay,
6776 pubkeys: counterparty_pubkeys,
6778 funding_outpoint: None,
6779 channel_type_features: channel_type.clone()
6781 funding_transaction: None,
6782 is_batch_funding: None,
6784 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6785 counterparty_prev_commitment_point: None,
6786 counterparty_node_id,
6788 counterparty_shutdown_scriptpubkey,
6790 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6792 channel_update_status: ChannelUpdateStatus::Enabled,
6793 closing_signed_in_flight: false,
6795 announcement_sigs: None,
6797 #[cfg(any(test, fuzzing))]
6798 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6799 #[cfg(any(test, fuzzing))]
6800 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6802 workaround_lnd_bug_4006: None,
6803 sent_message_awaiting_response: None,
6805 latest_inbound_scid_alias: None,
6806 outbound_scid_alias: 0,
6808 channel_pending_event_emitted: false,
6809 channel_ready_event_emitted: false,
6811 #[cfg(any(test, fuzzing))]
6812 historical_inbound_htlc_fulfills: HashSet::new(),
6817 blocked_monitor_updates: Vec::new(),
6819 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6825 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6826 /// should be sent back to the counterparty node.
6828 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6829 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6830 if self.context.is_outbound() {
6831 panic!("Tried to send accept_channel for an outbound channel?");
6833 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6834 panic!("Tried to send accept_channel after channel had moved forward");
6836 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6837 panic!("Tried to send an accept_channel for a channel that has already advanced");
6840 self.generate_accept_channel_message()
6843 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6844 /// inbound channel. If the intention is to accept an inbound channel, use
6845 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6847 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6848 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6849 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6850 let keys = self.context.get_holder_pubkeys();
6852 msgs::AcceptChannel {
6853 temporary_channel_id: self.context.channel_id,
6854 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6855 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6856 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6857 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6858 minimum_depth: self.context.minimum_depth.unwrap(),
6859 to_self_delay: self.context.get_holder_selected_contest_delay(),
6860 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6861 funding_pubkey: keys.funding_pubkey,
6862 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6863 payment_point: keys.payment_point,
6864 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6865 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6866 first_per_commitment_point,
6867 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6868 Some(script) => script.clone().into_inner(),
6869 None => Builder::new().into_script(),
6871 channel_type: Some(self.context.channel_type.clone()),
6873 next_local_nonce: None,
6877 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6878 /// inbound channel without accepting it.
6880 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6882 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6883 self.generate_accept_channel_message()
6886 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
6887 let funding_script = self.context.get_funding_redeemscript();
6889 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6890 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6891 let trusted_tx = initial_commitment_tx.trust();
6892 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6893 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6894 // They sign the holder commitment transaction...
6895 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6896 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6897 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6898 encode::serialize_hex(&funding_script), &self.context.channel_id());
6899 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6901 Ok(initial_commitment_tx)
6904 pub fn funding_created<L: Deref>(
6905 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6906 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
6910 if self.context.is_outbound() {
6911 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6913 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6914 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6915 // remember the channel, so it's safe to just send an error_message here and drop the
6917 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6919 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6920 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6921 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6922 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6925 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6926 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6927 // This is an externally observable change before we finish all our checks. In particular
6928 // check_funding_created_signature may fail.
6929 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6931 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
6933 Err(ChannelError::Close(e)) => {
6934 self.context.channel_transaction_parameters.funding_outpoint = None;
6935 return Err((self, ChannelError::Close(e)));
6938 // The only error we know how to handle is ChannelError::Close, so we fall over here
6939 // to make sure we don't continue with an inconsistent state.
6940 panic!("unexpected error type from check_funding_created_signature {:?}", e);
6944 let holder_commitment_tx = HolderCommitmentTransaction::new(
6945 initial_commitment_tx,
6948 &self.context.get_holder_pubkeys().funding_pubkey,
6949 self.context.counterparty_funding_pubkey()
6952 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6953 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6956 // Now that we're past error-generating stuff, update our local state:
6958 self.context.channel_state = ChannelState::FundingSent as u32;
6959 self.context.channel_id = funding_txo.to_channel_id();
6960 self.context.cur_counterparty_commitment_transaction_number -= 1;
6961 self.context.cur_holder_commitment_transaction_number -= 1;
6963 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
6965 let funding_redeemscript = self.context.get_funding_redeemscript();
6966 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6967 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6968 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6969 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6970 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6971 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6972 shutdown_script, self.context.get_holder_selected_contest_delay(),
6973 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6974 &self.context.channel_transaction_parameters,
6975 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6977 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6978 channel_monitor.provide_initial_counterparty_commitment_tx(
6979 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6980 self.context.cur_counterparty_commitment_transaction_number + 1,
6981 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6982 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6983 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6985 log_info!(logger, "{} funding_signed for peer for channel {}",
6986 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
6988 // Promote the channel to a full-fledged one now that we have updated the state and have a
6989 // `ChannelMonitor`.
6990 let mut channel = Channel {
6991 context: self.context,
6993 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6994 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6996 Ok((channel, funding_signed, channel_monitor))
7000 const SERIALIZATION_VERSION: u8 = 3;
7001 const MIN_SERIALIZATION_VERSION: u8 = 3;
7003 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7009 impl Writeable for ChannelUpdateStatus {
7010 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7011 // We only care about writing out the current state as it was announced, ie only either
7012 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7013 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7015 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7016 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7017 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7018 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7024 impl Readable for ChannelUpdateStatus {
7025 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7026 Ok(match <u8 as Readable>::read(reader)? {
7027 0 => ChannelUpdateStatus::Enabled,
7028 1 => ChannelUpdateStatus::Disabled,
7029 _ => return Err(DecodeError::InvalidValue),
7034 impl Writeable for AnnouncementSigsState {
7035 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7036 // We only care about writing out the current state as if we had just disconnected, at
7037 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7039 AnnouncementSigsState::NotSent => 0u8.write(writer),
7040 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7041 AnnouncementSigsState::Committed => 0u8.write(writer),
7042 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7047 impl Readable for AnnouncementSigsState {
7048 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7049 Ok(match <u8 as Readable>::read(reader)? {
7050 0 => AnnouncementSigsState::NotSent,
7051 1 => AnnouncementSigsState::PeerReceived,
7052 _ => return Err(DecodeError::InvalidValue),
7057 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7058 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7059 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7062 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7064 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7065 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7066 // the low bytes now and the optional high bytes later.
7067 let user_id_low = self.context.user_id as u64;
7068 user_id_low.write(writer)?;
7070 // Version 1 deserializers expected to read parts of the config object here. Version 2
7071 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7072 // `minimum_depth` we simply write dummy values here.
7073 writer.write_all(&[0; 8])?;
7075 self.context.channel_id.write(writer)?;
7076 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
7077 self.context.channel_value_satoshis.write(writer)?;
7079 self.context.latest_monitor_update_id.write(writer)?;
7081 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7082 // deserialized from that format.
7083 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7084 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7085 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7087 self.context.destination_script.write(writer)?;
7089 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7090 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7091 self.context.value_to_self_msat.write(writer)?;
7093 let mut dropped_inbound_htlcs = 0;
7094 for htlc in self.context.pending_inbound_htlcs.iter() {
7095 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7096 dropped_inbound_htlcs += 1;
7099 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7100 for htlc in self.context.pending_inbound_htlcs.iter() {
7101 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7104 htlc.htlc_id.write(writer)?;
7105 htlc.amount_msat.write(writer)?;
7106 htlc.cltv_expiry.write(writer)?;
7107 htlc.payment_hash.write(writer)?;
7109 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7110 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7112 htlc_state.write(writer)?;
7114 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7116 htlc_state.write(writer)?;
7118 &InboundHTLCState::Committed => {
7121 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7123 removal_reason.write(writer)?;
7128 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7129 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7130 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7132 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7133 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
7134 htlc.htlc_id.write(writer)?;
7135 htlc.amount_msat.write(writer)?;
7136 htlc.cltv_expiry.write(writer)?;
7137 htlc.payment_hash.write(writer)?;
7138 htlc.source.write(writer)?;
7140 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7142 onion_packet.write(writer)?;
7144 &OutboundHTLCState::Committed => {
7147 &OutboundHTLCState::RemoteRemoved(_) => {
7148 // Treat this as a Committed because we haven't received the CS - they'll
7149 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7152 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7154 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7155 preimages.push(preimage);
7157 let reason: Option<&HTLCFailReason> = outcome.into();
7158 reason.write(writer)?;
7160 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7162 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7163 preimages.push(preimage);
7165 let reason: Option<&HTLCFailReason> = outcome.into();
7166 reason.write(writer)?;
7169 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
7170 if pending_outbound_skimmed_fees.is_empty() {
7171 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
7173 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
7174 } else if !pending_outbound_skimmed_fees.is_empty() {
7175 pending_outbound_skimmed_fees.push(None);
7177 pending_outbound_blinding_points.push(htlc.blinding_point);
7180 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7181 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7182 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7183 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7185 &HTLCUpdateAwaitingACK::AddHTLC {
7186 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7187 blinding_point, skimmed_fee_msat,
7190 amount_msat.write(writer)?;
7191 cltv_expiry.write(writer)?;
7192 payment_hash.write(writer)?;
7193 source.write(writer)?;
7194 onion_routing_packet.write(writer)?;
7196 if let Some(skimmed_fee) = skimmed_fee_msat {
7197 if holding_cell_skimmed_fees.is_empty() {
7198 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7200 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7201 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7203 holding_cell_blinding_points.push(blinding_point);
7205 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7207 payment_preimage.write(writer)?;
7208 htlc_id.write(writer)?;
7210 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7212 htlc_id.write(writer)?;
7213 err_packet.write(writer)?;
7218 match self.context.resend_order {
7219 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7220 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7223 self.context.monitor_pending_channel_ready.write(writer)?;
7224 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7225 self.context.monitor_pending_commitment_signed.write(writer)?;
7227 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7228 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7229 pending_forward.write(writer)?;
7230 htlc_id.write(writer)?;
7233 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7234 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7235 htlc_source.write(writer)?;
7236 payment_hash.write(writer)?;
7237 fail_reason.write(writer)?;
7240 if self.context.is_outbound() {
7241 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7242 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7243 Some(feerate).write(writer)?;
7245 // As for inbound HTLCs, if the update was only announced and never committed in a
7246 // commitment_signed, drop it.
7247 None::<u32>.write(writer)?;
7249 self.context.holding_cell_update_fee.write(writer)?;
7251 self.context.next_holder_htlc_id.write(writer)?;
7252 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7253 self.context.update_time_counter.write(writer)?;
7254 self.context.feerate_per_kw.write(writer)?;
7256 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7257 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7258 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7259 // consider the stale state on reload.
7262 self.context.funding_tx_confirmed_in.write(writer)?;
7263 self.context.funding_tx_confirmation_height.write(writer)?;
7264 self.context.short_channel_id.write(writer)?;
7266 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7267 self.context.holder_dust_limit_satoshis.write(writer)?;
7268 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7270 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7271 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7273 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7274 self.context.holder_htlc_minimum_msat.write(writer)?;
7275 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7277 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7278 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7280 match &self.context.counterparty_forwarding_info {
7283 info.fee_base_msat.write(writer)?;
7284 info.fee_proportional_millionths.write(writer)?;
7285 info.cltv_expiry_delta.write(writer)?;
7287 None => 0u8.write(writer)?
7290 self.context.channel_transaction_parameters.write(writer)?;
7291 self.context.funding_transaction.write(writer)?;
7293 self.context.counterparty_cur_commitment_point.write(writer)?;
7294 self.context.counterparty_prev_commitment_point.write(writer)?;
7295 self.context.counterparty_node_id.write(writer)?;
7297 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7299 self.context.commitment_secrets.write(writer)?;
7301 self.context.channel_update_status.write(writer)?;
7303 #[cfg(any(test, fuzzing))]
7304 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7305 #[cfg(any(test, fuzzing))]
7306 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7307 htlc.write(writer)?;
7310 // If the channel type is something other than only-static-remote-key, then we need to have
7311 // older clients fail to deserialize this channel at all. If the type is
7312 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7314 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7315 Some(&self.context.channel_type) } else { None };
7317 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7318 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7319 // a different percentage of the channel value then 10%, which older versions of LDK used
7320 // to set it to before the percentage was made configurable.
7321 let serialized_holder_selected_reserve =
7322 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7323 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7325 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7326 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7327 let serialized_holder_htlc_max_in_flight =
7328 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7329 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7331 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7332 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7334 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7335 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7336 // we write the high bytes as an option here.
7337 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7339 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7341 write_tlv_fields!(writer, {
7342 (0, self.context.announcement_sigs, option),
7343 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7344 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7345 // them twice, once with their original default values above, and once as an option
7346 // here. On the read side, old versions will simply ignore the odd-type entries here,
7347 // and new versions map the default values to None and allow the TLV entries here to
7349 (1, self.context.minimum_depth, option),
7350 (2, chan_type, option),
7351 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7352 (4, serialized_holder_selected_reserve, option),
7353 (5, self.context.config, required),
7354 (6, serialized_holder_htlc_max_in_flight, option),
7355 (7, self.context.shutdown_scriptpubkey, option),
7356 (8, self.context.blocked_monitor_updates, optional_vec),
7357 (9, self.context.target_closing_feerate_sats_per_kw, option),
7358 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7359 (13, self.context.channel_creation_height, required),
7360 (15, preimages, required_vec),
7361 (17, self.context.announcement_sigs_state, required),
7362 (19, self.context.latest_inbound_scid_alias, option),
7363 (21, self.context.outbound_scid_alias, required),
7364 (23, channel_ready_event_emitted, option),
7365 (25, user_id_high_opt, option),
7366 (27, self.context.channel_keys_id, required),
7367 (28, holder_max_accepted_htlcs, option),
7368 (29, self.context.temporary_channel_id, option),
7369 (31, channel_pending_event_emitted, option),
7370 (35, pending_outbound_skimmed_fees, optional_vec),
7371 (37, holding_cell_skimmed_fees, optional_vec),
7372 (38, self.context.is_batch_funding, option),
7373 (39, pending_outbound_blinding_points, optional_vec),
7374 (41, holding_cell_blinding_points, optional_vec),
7381 const MAX_ALLOC_SIZE: usize = 64*1024;
7382 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7384 ES::Target: EntropySource,
7385 SP::Target: SignerProvider
7387 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7388 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7389 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7391 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7392 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7393 // the low bytes now and the high bytes later.
7394 let user_id_low: u64 = Readable::read(reader)?;
7396 let mut config = Some(LegacyChannelConfig::default());
7398 // Read the old serialization of the ChannelConfig from version 0.0.98.
7399 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7400 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7401 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7402 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7404 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7405 let mut _val: u64 = Readable::read(reader)?;
7408 let channel_id = Readable::read(reader)?;
7409 let channel_state = Readable::read(reader)?;
7410 let channel_value_satoshis = Readable::read(reader)?;
7412 let latest_monitor_update_id = Readable::read(reader)?;
7414 let mut keys_data = None;
7416 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7417 // the `channel_keys_id` TLV is present below.
7418 let keys_len: u32 = Readable::read(reader)?;
7419 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7420 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7421 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7422 let mut data = [0; 1024];
7423 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7424 reader.read_exact(read_slice)?;
7425 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7429 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7430 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7431 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7434 let destination_script = Readable::read(reader)?;
7436 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7437 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7438 let value_to_self_msat = Readable::read(reader)?;
7440 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7442 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7443 for _ in 0..pending_inbound_htlc_count {
7444 pending_inbound_htlcs.push(InboundHTLCOutput {
7445 htlc_id: Readable::read(reader)?,
7446 amount_msat: Readable::read(reader)?,
7447 cltv_expiry: Readable::read(reader)?,
7448 payment_hash: Readable::read(reader)?,
7449 state: match <u8 as Readable>::read(reader)? {
7450 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7451 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7452 3 => InboundHTLCState::Committed,
7453 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7454 _ => return Err(DecodeError::InvalidValue),
7459 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7460 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7461 for _ in 0..pending_outbound_htlc_count {
7462 pending_outbound_htlcs.push(OutboundHTLCOutput {
7463 htlc_id: Readable::read(reader)?,
7464 amount_msat: Readable::read(reader)?,
7465 cltv_expiry: Readable::read(reader)?,
7466 payment_hash: Readable::read(reader)?,
7467 source: Readable::read(reader)?,
7468 state: match <u8 as Readable>::read(reader)? {
7469 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7470 1 => OutboundHTLCState::Committed,
7472 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7473 OutboundHTLCState::RemoteRemoved(option.into())
7476 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7477 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7480 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7481 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7483 _ => return Err(DecodeError::InvalidValue),
7485 skimmed_fee_msat: None,
7486 blinding_point: None,
7490 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7491 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7492 for _ in 0..holding_cell_htlc_update_count {
7493 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7494 0 => HTLCUpdateAwaitingACK::AddHTLC {
7495 amount_msat: Readable::read(reader)?,
7496 cltv_expiry: Readable::read(reader)?,
7497 payment_hash: Readable::read(reader)?,
7498 source: Readable::read(reader)?,
7499 onion_routing_packet: Readable::read(reader)?,
7500 skimmed_fee_msat: None,
7501 blinding_point: None,
7503 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7504 payment_preimage: Readable::read(reader)?,
7505 htlc_id: Readable::read(reader)?,
7507 2 => HTLCUpdateAwaitingACK::FailHTLC {
7508 htlc_id: Readable::read(reader)?,
7509 err_packet: Readable::read(reader)?,
7511 _ => return Err(DecodeError::InvalidValue),
7515 let resend_order = match <u8 as Readable>::read(reader)? {
7516 0 => RAACommitmentOrder::CommitmentFirst,
7517 1 => RAACommitmentOrder::RevokeAndACKFirst,
7518 _ => return Err(DecodeError::InvalidValue),
7521 let monitor_pending_channel_ready = Readable::read(reader)?;
7522 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7523 let monitor_pending_commitment_signed = Readable::read(reader)?;
7525 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7526 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7527 for _ in 0..monitor_pending_forwards_count {
7528 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7531 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7532 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7533 for _ in 0..monitor_pending_failures_count {
7534 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7537 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7539 let holding_cell_update_fee = Readable::read(reader)?;
7541 let next_holder_htlc_id = Readable::read(reader)?;
7542 let next_counterparty_htlc_id = Readable::read(reader)?;
7543 let update_time_counter = Readable::read(reader)?;
7544 let feerate_per_kw = Readable::read(reader)?;
7546 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7547 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7548 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7549 // consider the stale state on reload.
7550 match <u8 as Readable>::read(reader)? {
7553 let _: u32 = Readable::read(reader)?;
7554 let _: u64 = Readable::read(reader)?;
7555 let _: Signature = Readable::read(reader)?;
7557 _ => return Err(DecodeError::InvalidValue),
7560 let funding_tx_confirmed_in = Readable::read(reader)?;
7561 let funding_tx_confirmation_height = Readable::read(reader)?;
7562 let short_channel_id = Readable::read(reader)?;
7564 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7565 let holder_dust_limit_satoshis = Readable::read(reader)?;
7566 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7567 let mut counterparty_selected_channel_reserve_satoshis = None;
7569 // Read the old serialization from version 0.0.98.
7570 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7572 // Read the 8 bytes of backwards-compatibility data.
7573 let _dummy: u64 = Readable::read(reader)?;
7575 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7576 let holder_htlc_minimum_msat = Readable::read(reader)?;
7577 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7579 let mut minimum_depth = None;
7581 // Read the old serialization from version 0.0.98.
7582 minimum_depth = Some(Readable::read(reader)?);
7584 // Read the 4 bytes of backwards-compatibility data.
7585 let _dummy: u32 = Readable::read(reader)?;
7588 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7590 1 => Some(CounterpartyForwardingInfo {
7591 fee_base_msat: Readable::read(reader)?,
7592 fee_proportional_millionths: Readable::read(reader)?,
7593 cltv_expiry_delta: Readable::read(reader)?,
7595 _ => return Err(DecodeError::InvalidValue),
7598 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7599 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7601 let counterparty_cur_commitment_point = Readable::read(reader)?;
7603 let counterparty_prev_commitment_point = Readable::read(reader)?;
7604 let counterparty_node_id = Readable::read(reader)?;
7606 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7607 let commitment_secrets = Readable::read(reader)?;
7609 let channel_update_status = Readable::read(reader)?;
7611 #[cfg(any(test, fuzzing))]
7612 let mut historical_inbound_htlc_fulfills = HashSet::new();
7613 #[cfg(any(test, fuzzing))]
7615 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7616 for _ in 0..htlc_fulfills_len {
7617 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7621 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7622 Some((feerate, if channel_parameters.is_outbound_from_holder {
7623 FeeUpdateState::Outbound
7625 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7631 let mut announcement_sigs = None;
7632 let mut target_closing_feerate_sats_per_kw = None;
7633 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7634 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7635 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7636 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7637 // only, so we default to that if none was written.
7638 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7639 let mut channel_creation_height = Some(serialized_height);
7640 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7642 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7643 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7644 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7645 let mut latest_inbound_scid_alias = None;
7646 let mut outbound_scid_alias = None;
7647 let mut channel_pending_event_emitted = None;
7648 let mut channel_ready_event_emitted = None;
7650 let mut user_id_high_opt: Option<u64> = None;
7651 let mut channel_keys_id: Option<[u8; 32]> = None;
7652 let mut temporary_channel_id: Option<ChannelId> = None;
7653 let mut holder_max_accepted_htlcs: Option<u16> = None;
7655 let mut blocked_monitor_updates = Some(Vec::new());
7657 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7658 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7660 let mut is_batch_funding: Option<()> = None;
7662 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7663 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7665 read_tlv_fields!(reader, {
7666 (0, announcement_sigs, option),
7667 (1, minimum_depth, option),
7668 (2, channel_type, option),
7669 (3, counterparty_selected_channel_reserve_satoshis, option),
7670 (4, holder_selected_channel_reserve_satoshis, option),
7671 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7672 (6, holder_max_htlc_value_in_flight_msat, option),
7673 (7, shutdown_scriptpubkey, option),
7674 (8, blocked_monitor_updates, optional_vec),
7675 (9, target_closing_feerate_sats_per_kw, option),
7676 (11, monitor_pending_finalized_fulfills, optional_vec),
7677 (13, channel_creation_height, option),
7678 (15, preimages_opt, optional_vec),
7679 (17, announcement_sigs_state, option),
7680 (19, latest_inbound_scid_alias, option),
7681 (21, outbound_scid_alias, option),
7682 (23, channel_ready_event_emitted, option),
7683 (25, user_id_high_opt, option),
7684 (27, channel_keys_id, option),
7685 (28, holder_max_accepted_htlcs, option),
7686 (29, temporary_channel_id, option),
7687 (31, channel_pending_event_emitted, option),
7688 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7689 (37, holding_cell_skimmed_fees_opt, optional_vec),
7690 (38, is_batch_funding, option),
7691 (39, pending_outbound_blinding_points_opt, optional_vec),
7692 (41, holding_cell_blinding_points_opt, optional_vec),
7695 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7696 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7697 // If we've gotten to the funding stage of the channel, populate the signer with its
7698 // required channel parameters.
7699 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7700 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7701 holder_signer.provide_channel_parameters(&channel_parameters);
7703 (channel_keys_id, holder_signer)
7705 // `keys_data` can be `None` if we had corrupted data.
7706 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7707 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7708 (holder_signer.channel_keys_id(), holder_signer)
7711 if let Some(preimages) = preimages_opt {
7712 let mut iter = preimages.into_iter();
7713 for htlc in pending_outbound_htlcs.iter_mut() {
7715 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7716 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7718 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7719 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7724 // We expect all preimages to be consumed above
7725 if iter.next().is_some() {
7726 return Err(DecodeError::InvalidValue);
7730 let chan_features = channel_type.as_ref().unwrap();
7731 if !chan_features.is_subset(our_supported_features) {
7732 // If the channel was written by a new version and negotiated with features we don't
7733 // understand yet, refuse to read it.
7734 return Err(DecodeError::UnknownRequiredFeature);
7737 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7738 // To account for that, we're proactively setting/overriding the field here.
7739 channel_parameters.channel_type_features = chan_features.clone();
7741 let mut secp_ctx = Secp256k1::new();
7742 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7744 // `user_id` used to be a single u64 value. In order to remain backwards
7745 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7746 // separate u64 values.
7747 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7749 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7751 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7752 let mut iter = skimmed_fees.into_iter();
7753 for htlc in pending_outbound_htlcs.iter_mut() {
7754 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7756 // We expect all skimmed fees to be consumed above
7757 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7759 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7760 let mut iter = skimmed_fees.into_iter();
7761 for htlc in holding_cell_htlc_updates.iter_mut() {
7762 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7763 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7766 // We expect all skimmed fees to be consumed above
7767 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7769 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
7770 let mut iter = blinding_pts.into_iter();
7771 for htlc in pending_outbound_htlcs.iter_mut() {
7772 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7774 // We expect all blinding points to be consumed above
7775 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7777 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
7778 let mut iter = blinding_pts.into_iter();
7779 for htlc in holding_cell_htlc_updates.iter_mut() {
7780 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
7781 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7784 // We expect all blinding points to be consumed above
7785 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7789 context: ChannelContext {
7792 config: config.unwrap(),
7796 // Note that we don't care about serializing handshake limits as we only ever serialize
7797 // channel data after the handshake has completed.
7798 inbound_handshake_limits_override: None,
7801 temporary_channel_id,
7803 announcement_sigs_state: announcement_sigs_state.unwrap(),
7805 channel_value_satoshis,
7807 latest_monitor_update_id,
7809 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7810 shutdown_scriptpubkey,
7813 cur_holder_commitment_transaction_number,
7814 cur_counterparty_commitment_transaction_number,
7817 holder_max_accepted_htlcs,
7818 pending_inbound_htlcs,
7819 pending_outbound_htlcs,
7820 holding_cell_htlc_updates,
7824 monitor_pending_channel_ready,
7825 monitor_pending_revoke_and_ack,
7826 monitor_pending_commitment_signed,
7827 monitor_pending_forwards,
7828 monitor_pending_failures,
7829 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7831 signer_pending_commitment_update: false,
7832 signer_pending_funding: false,
7835 holding_cell_update_fee,
7836 next_holder_htlc_id,
7837 next_counterparty_htlc_id,
7838 update_time_counter,
7841 #[cfg(debug_assertions)]
7842 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7843 #[cfg(debug_assertions)]
7844 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7846 last_sent_closing_fee: None,
7847 pending_counterparty_closing_signed: None,
7848 expecting_peer_commitment_signed: false,
7849 closing_fee_limits: None,
7850 target_closing_feerate_sats_per_kw,
7852 funding_tx_confirmed_in,
7853 funding_tx_confirmation_height,
7855 channel_creation_height: channel_creation_height.unwrap(),
7857 counterparty_dust_limit_satoshis,
7858 holder_dust_limit_satoshis,
7859 counterparty_max_htlc_value_in_flight_msat,
7860 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7861 counterparty_selected_channel_reserve_satoshis,
7862 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7863 counterparty_htlc_minimum_msat,
7864 holder_htlc_minimum_msat,
7865 counterparty_max_accepted_htlcs,
7868 counterparty_forwarding_info,
7870 channel_transaction_parameters: channel_parameters,
7871 funding_transaction,
7874 counterparty_cur_commitment_point,
7875 counterparty_prev_commitment_point,
7876 counterparty_node_id,
7878 counterparty_shutdown_scriptpubkey,
7882 channel_update_status,
7883 closing_signed_in_flight: false,
7887 #[cfg(any(test, fuzzing))]
7888 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7889 #[cfg(any(test, fuzzing))]
7890 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7892 workaround_lnd_bug_4006: None,
7893 sent_message_awaiting_response: None,
7895 latest_inbound_scid_alias,
7896 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7897 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7899 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7900 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7902 #[cfg(any(test, fuzzing))]
7903 historical_inbound_htlc_fulfills,
7905 channel_type: channel_type.unwrap(),
7908 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7917 use bitcoin::blockdata::constants::ChainHash;
7918 use bitcoin::blockdata::script::{ScriptBuf, Builder};
7919 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7920 use bitcoin::blockdata::opcodes;
7921 use bitcoin::network::constants::Network;
7922 use crate::ln::{PaymentHash, PaymentPreimage};
7923 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
7924 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7925 use crate::ln::channel::InitFeatures;
7926 use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
7927 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7928 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
7929 use crate::ln::msgs;
7930 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7931 use crate::ln::script::ShutdownScript;
7932 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
7933 use crate::chain::BestBlock;
7934 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7935 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7936 use crate::chain::transaction::OutPoint;
7937 use crate::routing::router::{Path, RouteHop};
7938 use crate::util::config::UserConfig;
7939 use crate::util::errors::APIError;
7940 use crate::util::ser::{ReadableArgs, Writeable};
7941 use crate::util::test_utils;
7942 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7943 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7944 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7945 use bitcoin::secp256k1::{SecretKey,PublicKey};
7946 use bitcoin::hashes::sha256::Hash as Sha256;
7947 use bitcoin::hashes::Hash;
7948 use bitcoin::hashes::hex::FromHex;
7949 use bitcoin::hash_types::WPubkeyHash;
7950 use bitcoin::blockdata::locktime::absolute::LockTime;
7951 use bitcoin::address::{WitnessProgram, WitnessVersion};
7952 use crate::prelude::*;
7954 struct TestFeeEstimator {
7957 impl FeeEstimator for TestFeeEstimator {
7958 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7964 fn test_max_funding_satoshis_no_wumbo() {
7965 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7966 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7967 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7971 signer: InMemorySigner,
7974 impl EntropySource for Keys {
7975 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7978 impl SignerProvider for Keys {
7979 type EcdsaSigner = InMemorySigner;
7981 type TaprootSigner = InMemorySigner;
7983 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7984 self.signer.channel_keys_id()
7987 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
7991 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
7993 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
7994 let secp_ctx = Secp256k1::signing_only();
7995 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7996 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7997 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8000 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8001 let secp_ctx = Secp256k1::signing_only();
8002 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8003 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8007 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8008 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8009 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8013 fn upfront_shutdown_script_incompatibility() {
8014 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8015 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8016 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8019 let seed = [42; 32];
8020 let network = Network::Testnet;
8021 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8022 keys_provider.expect(OnGetShutdownScriptpubkey {
8023 returns: non_v0_segwit_shutdown_script.clone(),
8026 let secp_ctx = Secp256k1::new();
8027 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8028 let config = UserConfig::default();
8029 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8030 Err(APIError::IncompatibleShutdownScript { script }) => {
8031 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8033 Err(e) => panic!("Unexpected error: {:?}", e),
8034 Ok(_) => panic!("Expected error"),
8038 // Check that, during channel creation, we use the same feerate in the open channel message
8039 // as we do in the Channel object creation itself.
8041 fn test_open_channel_msg_fee() {
8042 let original_fee = 253;
8043 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8044 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8045 let secp_ctx = Secp256k1::new();
8046 let seed = [42; 32];
8047 let network = Network::Testnet;
8048 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8050 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8051 let config = UserConfig::default();
8052 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8054 // Now change the fee so we can check that the fee in the open_channel message is the
8055 // same as the old fee.
8056 fee_est.fee_est = 500;
8057 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8058 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8062 fn test_holder_vs_counterparty_dust_limit() {
8063 // Test that when calculating the local and remote commitment transaction fees, the correct
8064 // dust limits are used.
8065 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8066 let secp_ctx = Secp256k1::new();
8067 let seed = [42; 32];
8068 let network = Network::Testnet;
8069 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8070 let logger = test_utils::TestLogger::new();
8071 let best_block = BestBlock::from_network(network);
8073 // Go through the flow of opening a channel between two nodes, making sure
8074 // they have different dust limits.
8076 // Create Node A's channel pointing to Node B's pubkey
8077 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8078 let config = UserConfig::default();
8079 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8081 // Create Node B's channel by receiving Node A's open_channel message
8082 // Make sure A's dust limit is as we expect.
8083 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8084 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8085 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8087 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8088 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8089 accept_channel_msg.dust_limit_satoshis = 546;
8090 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8091 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8093 // Node A --> Node B: funding created
8094 let output_script = node_a_chan.context.get_funding_redeemscript();
8095 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8096 value: 10000000, script_pubkey: output_script.clone(),
8098 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8099 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8100 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8102 // Node B --> Node A: funding signed
8103 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8105 // Put some inbound and outbound HTLCs in A's channel.
8106 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8107 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8109 amount_msat: htlc_amount_msat,
8110 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8111 cltv_expiry: 300000000,
8112 state: InboundHTLCState::Committed,
8115 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8117 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8118 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8119 cltv_expiry: 200000000,
8120 state: OutboundHTLCState::Committed,
8121 source: HTLCSource::OutboundRoute {
8122 path: Path { hops: Vec::new(), blinded_tail: None },
8123 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8124 first_hop_htlc_msat: 548,
8125 payment_id: PaymentId([42; 32]),
8127 skimmed_fee_msat: None,
8128 blinding_point: None,
8131 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8132 // the dust limit check.
8133 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8134 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8135 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8136 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8138 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8139 // of the HTLCs are seen to be above the dust limit.
8140 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8141 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8142 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8143 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8144 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8148 fn test_timeout_vs_success_htlc_dust_limit() {
8149 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8150 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8151 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8152 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8153 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8154 let secp_ctx = Secp256k1::new();
8155 let seed = [42; 32];
8156 let network = Network::Testnet;
8157 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8159 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8160 let config = UserConfig::default();
8161 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8163 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8164 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8166 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8167 // counted as dust when it shouldn't be.
8168 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8169 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8170 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8171 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8173 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8174 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8175 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8176 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8177 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8179 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8181 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8182 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8183 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8184 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8185 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8187 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8188 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8189 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8190 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8191 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8195 fn channel_reestablish_no_updates() {
8196 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8197 let logger = test_utils::TestLogger::new();
8198 let secp_ctx = Secp256k1::new();
8199 let seed = [42; 32];
8200 let network = Network::Testnet;
8201 let best_block = BestBlock::from_network(network);
8202 let chain_hash = ChainHash::using_genesis_block(network);
8203 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8205 // Go through the flow of opening a channel between two nodes.
8207 // Create Node A's channel pointing to Node B's pubkey
8208 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8209 let config = UserConfig::default();
8210 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8212 // Create Node B's channel by receiving Node A's open_channel message
8213 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8214 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8215 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8217 // Node B --> Node A: accept channel
8218 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8219 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8221 // Node A --> Node B: funding created
8222 let output_script = node_a_chan.context.get_funding_redeemscript();
8223 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8224 value: 10000000, script_pubkey: output_script.clone(),
8226 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8227 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8228 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8230 // Node B --> Node A: funding signed
8231 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8233 // Now disconnect the two nodes and check that the commitment point in
8234 // Node B's channel_reestablish message is sane.
8235 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8236 let msg = node_b_chan.get_channel_reestablish(&&logger);
8237 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8238 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8239 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8241 // Check that the commitment point in Node A's channel_reestablish message
8243 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8244 let msg = node_a_chan.get_channel_reestablish(&&logger);
8245 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8246 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8247 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8251 fn test_configured_holder_max_htlc_value_in_flight() {
8252 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8253 let logger = test_utils::TestLogger::new();
8254 let secp_ctx = Secp256k1::new();
8255 let seed = [42; 32];
8256 let network = Network::Testnet;
8257 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8258 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8259 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8261 let mut config_2_percent = UserConfig::default();
8262 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8263 let mut config_99_percent = UserConfig::default();
8264 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8265 let mut config_0_percent = UserConfig::default();
8266 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8267 let mut config_101_percent = UserConfig::default();
8268 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8270 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8271 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8272 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8273 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8274 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8275 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8277 // Test with the upper bound - 1 of valid values (99%).
8278 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8279 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8280 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8282 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8284 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8285 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8286 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8287 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8288 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8289 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8291 // Test with the upper bound - 1 of valid values (99%).
8292 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8293 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8294 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8296 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8297 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8298 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8299 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8300 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8302 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8303 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8305 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8306 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8307 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8309 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8310 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8311 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8312 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8313 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8315 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8316 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8318 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8319 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8320 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8324 fn test_configured_holder_selected_channel_reserve_satoshis() {
8326 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8327 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8328 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8330 // Test with valid but unreasonably high channel reserves
8331 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8332 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8333 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8335 // Test with calculated channel reserve less than lower bound
8336 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8337 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8339 // Test with invalid channel reserves since sum of both is greater than or equal
8341 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8342 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8345 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8346 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8347 let logger = test_utils::TestLogger::new();
8348 let secp_ctx = Secp256k1::new();
8349 let seed = [42; 32];
8350 let network = Network::Testnet;
8351 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8352 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8353 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8356 let mut outbound_node_config = UserConfig::default();
8357 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8358 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8360 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8361 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8363 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8364 let mut inbound_node_config = UserConfig::default();
8365 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8367 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8368 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8370 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8372 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8373 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8375 // Channel Negotiations failed
8376 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8377 assert!(result.is_err());
8382 fn channel_update() {
8383 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8384 let logger = test_utils::TestLogger::new();
8385 let secp_ctx = Secp256k1::new();
8386 let seed = [42; 32];
8387 let network = Network::Testnet;
8388 let best_block = BestBlock::from_network(network);
8389 let chain_hash = ChainHash::using_genesis_block(network);
8390 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8392 // Create Node A's channel pointing to Node B's pubkey
8393 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8394 let config = UserConfig::default();
8395 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8397 // Create Node B's channel by receiving Node A's open_channel message
8398 // Make sure A's dust limit is as we expect.
8399 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8400 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8401 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8403 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8404 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8405 accept_channel_msg.dust_limit_satoshis = 546;
8406 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8407 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8409 // Node A --> Node B: funding created
8410 let output_script = node_a_chan.context.get_funding_redeemscript();
8411 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8412 value: 10000000, script_pubkey: output_script.clone(),
8414 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8415 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8416 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8418 // Node B --> Node A: funding signed
8419 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8421 // Make sure that receiving a channel update will update the Channel as expected.
8422 let update = ChannelUpdate {
8423 contents: UnsignedChannelUpdate {
8425 short_channel_id: 0,
8428 cltv_expiry_delta: 100,
8429 htlc_minimum_msat: 5,
8430 htlc_maximum_msat: MAX_VALUE_MSAT,
8432 fee_proportional_millionths: 11,
8433 excess_data: Vec::new(),
8435 signature: Signature::from(unsafe { FFISignature::new() })
8437 assert!(node_a_chan.channel_update(&update).unwrap());
8439 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8440 // change our official htlc_minimum_msat.
8441 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8442 match node_a_chan.context.counterparty_forwarding_info() {
8444 assert_eq!(info.cltv_expiry_delta, 100);
8445 assert_eq!(info.fee_base_msat, 110);
8446 assert_eq!(info.fee_proportional_millionths, 11);
8448 None => panic!("expected counterparty forwarding info to be Some")
8451 assert!(!node_a_chan.channel_update(&update).unwrap());
8455 fn blinding_point_ser() {
8456 // Ensure that channel blinding points are (de)serialized properly.
8457 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8458 let secp_ctx = Secp256k1::new();
8459 let seed = [42; 32];
8460 let network = Network::Testnet;
8461 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8463 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8464 let config = UserConfig::default();
8465 let features = channelmanager::provided_init_features(&config);
8466 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8467 let mut chan = Channel { context: outbound_chan.context };
8469 let dummy_htlc_source = HTLCSource::OutboundRoute {
8471 hops: vec![RouteHop {
8472 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8473 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8474 cltv_expiry_delta: 0, maybe_announced_channel: false,
8478 session_priv: test_utils::privkey(42),
8479 first_hop_htlc_msat: 0,
8480 payment_id: PaymentId([42; 32]),
8482 let dummy_outbound_output = OutboundHTLCOutput {
8485 payment_hash: PaymentHash([43; 32]),
8487 state: OutboundHTLCState::Committed,
8488 source: dummy_htlc_source.clone(),
8489 skimmed_fee_msat: None,
8490 blinding_point: None,
8492 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8493 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8495 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8498 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8500 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8503 payment_hash: PaymentHash([43; 32]),
8504 source: dummy_htlc_source.clone(),
8505 onion_routing_packet: msgs::OnionPacket {
8507 public_key: Ok(test_utils::pubkey(1)),
8508 hop_data: [0; 20*65],
8511 skimmed_fee_msat: None,
8512 blinding_point: None,
8514 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8515 payment_preimage: PaymentPreimage([42; 32]),
8518 let mut holding_cell_htlc_updates = Vec::with_capacity(10);
8521 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8522 } else if i % 3 == 1 {
8523 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8525 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8526 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = &mut dummy_add {
8527 *blinding_point = Some(test_utils::pubkey(42 + i));
8529 holding_cell_htlc_updates.push(dummy_add);
8532 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8534 // Encode and decode the channel and ensure that the HTLCs within are the same.
8535 let encoded_chan = chan.encode();
8536 let mut s = crate::io::Cursor::new(&encoded_chan);
8537 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8538 let features = channelmanager::provided_channel_type_features(&config);
8539 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8540 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8541 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8544 #[cfg(feature = "_test_vectors")]
8546 fn outbound_commitment_test() {
8547 use bitcoin::sighash;
8548 use bitcoin::consensus::encode::serialize;
8549 use bitcoin::sighash::EcdsaSighashType;
8550 use bitcoin::hashes::hex::FromHex;
8551 use bitcoin::hash_types::Txid;
8552 use bitcoin::secp256k1::Message;
8553 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8554 use crate::ln::PaymentPreimage;
8555 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8556 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8557 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8558 use crate::util::logger::Logger;
8559 use crate::sync::Arc;
8560 use core::str::FromStr;
8561 use hex::DisplayHex;
8563 // Test vectors from BOLT 3 Appendices C and F (anchors):
8564 let feeest = TestFeeEstimator{fee_est: 15000};
8565 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8566 let secp_ctx = Secp256k1::new();
8568 let mut signer = InMemorySigner::new(
8570 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8571 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8572 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8573 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8574 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8576 // These aren't set in the test vectors:
8577 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8583 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8584 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8585 let keys_provider = Keys { signer: signer.clone() };
8587 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8588 let mut config = UserConfig::default();
8589 config.channel_handshake_config.announced_channel = false;
8590 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8591 chan.context.holder_dust_limit_satoshis = 546;
8592 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8594 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8596 let counterparty_pubkeys = ChannelPublicKeys {
8597 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8598 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8599 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8600 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8601 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8603 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8604 CounterpartyChannelTransactionParameters {
8605 pubkeys: counterparty_pubkeys.clone(),
8606 selected_contest_delay: 144
8608 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8609 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8611 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8612 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8614 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8615 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8617 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8618 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8620 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8621 // derived from a commitment_seed, so instead we copy it here and call
8622 // build_commitment_transaction.
8623 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8624 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8625 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8626 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8627 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8629 macro_rules! test_commitment {
8630 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8631 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8632 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8636 macro_rules! test_commitment_with_anchors {
8637 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8638 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8639 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8643 macro_rules! test_commitment_common {
8644 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8645 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8647 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8648 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8650 let htlcs = commitment_stats.htlcs_included.drain(..)
8651 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8653 (commitment_stats.tx, htlcs)
8655 let trusted_tx = commitment_tx.trust();
8656 let unsigned_tx = trusted_tx.built_transaction();
8657 let redeemscript = chan.context.get_funding_redeemscript();
8658 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8659 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8660 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8661 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8663 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8664 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8665 let mut counterparty_htlc_sigs = Vec::new();
8666 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8668 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8669 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8670 counterparty_htlc_sigs.push(remote_signature);
8672 assert_eq!(htlcs.len(), per_htlc.len());
8674 let holder_commitment_tx = HolderCommitmentTransaction::new(
8675 commitment_tx.clone(),
8676 counterparty_signature,
8677 counterparty_htlc_sigs,
8678 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8679 chan.context.counterparty_funding_pubkey()
8681 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8682 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8684 let funding_redeemscript = chan.context.get_funding_redeemscript();
8685 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8686 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8688 // ((htlc, counterparty_sig), (index, holder_sig))
8689 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8692 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8693 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8695 let ref htlc = htlcs[$htlc_idx];
8696 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8697 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8698 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8699 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8700 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8701 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8702 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8704 let mut preimage: Option<PaymentPreimage> = None;
8707 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8708 if out == htlc.payment_hash {
8709 preimage = Some(PaymentPreimage([i; 32]));
8713 assert!(preimage.is_some());
8716 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8717 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8718 channel_derivation_parameters: ChannelDerivationParameters {
8719 value_satoshis: chan.context.channel_value_satoshis,
8720 keys_id: chan.context.channel_keys_id,
8721 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8723 commitment_txid: trusted_tx.txid(),
8724 per_commitment_number: trusted_tx.commitment_number(),
8725 per_commitment_point: trusted_tx.per_commitment_point(),
8726 feerate_per_kw: trusted_tx.feerate_per_kw(),
8728 preimage: preimage.clone(),
8729 counterparty_sig: *htlc_counterparty_sig,
8730 }, &secp_ctx).unwrap();
8731 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8732 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8734 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
8735 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8736 let trusted_tx = holder_commitment_tx.trust();
8737 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8738 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
8739 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
8741 assert!(htlc_counterparty_sig_iter.next().is_none());
8745 // anchors: simple commitment tx with no HTLCs and single anchor
8746 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8747 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8748 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8750 // simple commitment tx with no HTLCs
8751 chan.context.value_to_self_msat = 7000000000;
8753 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8754 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8755 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8757 // anchors: simple commitment tx with no HTLCs
8758 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8759 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8760 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8762 chan.context.pending_inbound_htlcs.push({
8763 let mut out = InboundHTLCOutput{
8765 amount_msat: 1000000,
8767 payment_hash: PaymentHash([0; 32]),
8768 state: InboundHTLCState::Committed,
8770 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
8773 chan.context.pending_inbound_htlcs.push({
8774 let mut out = InboundHTLCOutput{
8776 amount_msat: 2000000,
8778 payment_hash: PaymentHash([0; 32]),
8779 state: InboundHTLCState::Committed,
8781 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
8784 chan.context.pending_outbound_htlcs.push({
8785 let mut out = OutboundHTLCOutput{
8787 amount_msat: 2000000,
8789 payment_hash: PaymentHash([0; 32]),
8790 state: OutboundHTLCState::Committed,
8791 source: HTLCSource::dummy(),
8792 skimmed_fee_msat: None,
8793 blinding_point: None,
8795 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
8798 chan.context.pending_outbound_htlcs.push({
8799 let mut out = OutboundHTLCOutput{
8801 amount_msat: 3000000,
8803 payment_hash: PaymentHash([0; 32]),
8804 state: OutboundHTLCState::Committed,
8805 source: HTLCSource::dummy(),
8806 skimmed_fee_msat: None,
8807 blinding_point: None,
8809 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
8812 chan.context.pending_inbound_htlcs.push({
8813 let mut out = InboundHTLCOutput{
8815 amount_msat: 4000000,
8817 payment_hash: PaymentHash([0; 32]),
8818 state: InboundHTLCState::Committed,
8820 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
8824 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8825 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8826 chan.context.feerate_per_kw = 0;
8828 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8829 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8830 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8833 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8834 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8835 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8838 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8839 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8840 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8843 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8844 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8845 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8848 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8849 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8850 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8853 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8854 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8855 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8858 // commitment tx with seven outputs untrimmed (maximum feerate)
8859 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8860 chan.context.feerate_per_kw = 647;
8862 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8863 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8864 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8867 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8868 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8869 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8872 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8873 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8874 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8877 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8878 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8879 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8882 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8883 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8884 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8887 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8888 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8889 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8892 // commitment tx with six outputs untrimmed (minimum feerate)
8893 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8894 chan.context.feerate_per_kw = 648;
8896 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8897 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8898 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8901 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8902 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8903 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8906 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8907 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8908 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8911 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8912 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8913 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8916 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8917 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8918 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8921 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8922 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8923 chan.context.feerate_per_kw = 645;
8924 chan.context.holder_dust_limit_satoshis = 1001;
8926 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8927 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8928 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8931 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8932 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8933 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8936 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8937 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8938 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8941 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8942 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8943 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8946 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8947 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8948 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8951 // commitment tx with six outputs untrimmed (maximum feerate)
8952 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8953 chan.context.feerate_per_kw = 2069;
8954 chan.context.holder_dust_limit_satoshis = 546;
8956 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8957 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8958 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8961 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8962 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8963 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8966 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8967 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8968 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8971 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8972 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8973 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8976 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8977 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8978 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8981 // commitment tx with five outputs untrimmed (minimum feerate)
8982 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8983 chan.context.feerate_per_kw = 2070;
8985 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8986 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8987 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8990 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8991 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8992 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8995 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8996 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8997 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9000 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9001 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9002 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9005 // commitment tx with five outputs untrimmed (maximum feerate)
9006 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9007 chan.context.feerate_per_kw = 2194;
9009 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9010 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9011 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9014 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9015 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9016 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9019 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9020 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9021 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9024 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9025 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9026 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9029 // commitment tx with four outputs untrimmed (minimum feerate)
9030 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9031 chan.context.feerate_per_kw = 2195;
9033 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9034 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9035 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9038 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9039 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9040 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9043 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9044 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9045 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9048 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9049 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9050 chan.context.feerate_per_kw = 2185;
9051 chan.context.holder_dust_limit_satoshis = 2001;
9052 let cached_channel_type = chan.context.channel_type;
9053 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9055 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9056 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9057 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9060 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9061 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9062 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9065 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9066 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9067 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9070 // commitment tx with four outputs untrimmed (maximum feerate)
9071 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9072 chan.context.feerate_per_kw = 3702;
9073 chan.context.holder_dust_limit_satoshis = 546;
9074 chan.context.channel_type = cached_channel_type.clone();
9076 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9077 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9078 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9081 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9082 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9083 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9086 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9087 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9088 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9091 // commitment tx with three outputs untrimmed (minimum feerate)
9092 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9093 chan.context.feerate_per_kw = 3703;
9095 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9096 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9097 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9100 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9101 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9102 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9105 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9106 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9107 chan.context.feerate_per_kw = 3687;
9108 chan.context.holder_dust_limit_satoshis = 3001;
9109 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9111 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9112 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9113 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9116 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9117 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9118 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9121 // commitment tx with three outputs untrimmed (maximum feerate)
9122 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9123 chan.context.feerate_per_kw = 4914;
9124 chan.context.holder_dust_limit_satoshis = 546;
9125 chan.context.channel_type = cached_channel_type.clone();
9127 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9128 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9129 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9132 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9133 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9134 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9137 // commitment tx with two outputs untrimmed (minimum feerate)
9138 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9139 chan.context.feerate_per_kw = 4915;
9140 chan.context.holder_dust_limit_satoshis = 546;
9142 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9143 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9144 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9146 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9147 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9148 chan.context.feerate_per_kw = 4894;
9149 chan.context.holder_dust_limit_satoshis = 4001;
9150 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9152 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9153 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9154 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9156 // commitment tx with two outputs untrimmed (maximum feerate)
9157 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9158 chan.context.feerate_per_kw = 9651180;
9159 chan.context.holder_dust_limit_satoshis = 546;
9160 chan.context.channel_type = cached_channel_type.clone();
9162 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9163 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9164 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9166 // commitment tx with one output untrimmed (minimum feerate)
9167 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9168 chan.context.feerate_per_kw = 9651181;
9170 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9171 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9172 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9174 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9175 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9176 chan.context.feerate_per_kw = 6216010;
9177 chan.context.holder_dust_limit_satoshis = 4001;
9178 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9180 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9181 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9182 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9184 // commitment tx with fee greater than funder amount
9185 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9186 chan.context.feerate_per_kw = 9651936;
9187 chan.context.holder_dust_limit_satoshis = 546;
9188 chan.context.channel_type = cached_channel_type;
9190 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9191 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9192 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9194 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9195 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9196 chan.context.feerate_per_kw = 253;
9197 chan.context.pending_inbound_htlcs.clear();
9198 chan.context.pending_inbound_htlcs.push({
9199 let mut out = InboundHTLCOutput{
9201 amount_msat: 2000000,
9203 payment_hash: PaymentHash([0; 32]),
9204 state: InboundHTLCState::Committed,
9206 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9209 chan.context.pending_outbound_htlcs.clear();
9210 chan.context.pending_outbound_htlcs.push({
9211 let mut out = OutboundHTLCOutput{
9213 amount_msat: 5000001,
9215 payment_hash: PaymentHash([0; 32]),
9216 state: OutboundHTLCState::Committed,
9217 source: HTLCSource::dummy(),
9218 skimmed_fee_msat: None,
9219 blinding_point: None,
9221 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9224 chan.context.pending_outbound_htlcs.push({
9225 let mut out = OutboundHTLCOutput{
9227 amount_msat: 5000000,
9229 payment_hash: PaymentHash([0; 32]),
9230 state: OutboundHTLCState::Committed,
9231 source: HTLCSource::dummy(),
9232 skimmed_fee_msat: None,
9233 blinding_point: None,
9235 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9239 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9240 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9241 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9244 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9245 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9246 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9248 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9249 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9250 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9252 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9253 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9254 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9257 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9258 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9259 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9260 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9263 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9264 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9265 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9267 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9268 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9269 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9271 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9272 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9273 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9278 fn test_per_commitment_secret_gen() {
9279 // Test vectors from BOLT 3 Appendix D:
9281 let mut seed = [0; 32];
9282 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9283 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9284 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9286 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9287 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9288 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9290 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9291 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9293 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9294 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9296 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9297 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9298 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9302 fn test_key_derivation() {
9303 // Test vectors from BOLT 3 Appendix E:
9304 let secp_ctx = Secp256k1::new();
9306 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9307 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9309 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9310 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9312 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9313 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9315 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9316 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9318 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9319 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9321 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9322 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9326 fn test_zero_conf_channel_type_support() {
9327 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9328 let secp_ctx = Secp256k1::new();
9329 let seed = [42; 32];
9330 let network = Network::Testnet;
9331 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9332 let logger = test_utils::TestLogger::new();
9334 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9335 let config = UserConfig::default();
9336 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9337 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9339 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9340 channel_type_features.set_zero_conf_required();
9342 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9343 open_channel_msg.channel_type = Some(channel_type_features);
9344 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9345 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9346 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9347 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9348 assert!(res.is_ok());
9352 fn test_supports_anchors_zero_htlc_tx_fee() {
9353 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9354 // resulting `channel_type`.
9355 let secp_ctx = Secp256k1::new();
9356 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9357 let network = Network::Testnet;
9358 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9359 let logger = test_utils::TestLogger::new();
9361 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9362 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9364 let mut config = UserConfig::default();
9365 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9367 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9368 // need to signal it.
9369 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9370 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9371 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9372 &config, 0, 42, None
9374 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9376 let mut expected_channel_type = ChannelTypeFeatures::empty();
9377 expected_channel_type.set_static_remote_key_required();
9378 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9380 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9381 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9382 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9386 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9387 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9388 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9389 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9390 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9393 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9394 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9398 fn test_rejects_implicit_simple_anchors() {
9399 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9400 // each side's `InitFeatures`, it is rejected.
9401 let secp_ctx = Secp256k1::new();
9402 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9403 let network = Network::Testnet;
9404 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9405 let logger = test_utils::TestLogger::new();
9407 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9408 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9410 let config = UserConfig::default();
9412 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9413 let static_remote_key_required: u64 = 1 << 12;
9414 let simple_anchors_required: u64 = 1 << 20;
9415 let raw_init_features = static_remote_key_required | simple_anchors_required;
9416 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9418 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9419 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9420 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9424 // Set `channel_type` to `None` to force the implicit feature negotiation.
9425 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9426 open_channel_msg.channel_type = None;
9428 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9429 // `static_remote_key`, it will fail the channel.
9430 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9431 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9432 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9433 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9435 assert!(channel_b.is_err());
9439 fn test_rejects_simple_anchors_channel_type() {
9440 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9442 let secp_ctx = Secp256k1::new();
9443 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9444 let network = Network::Testnet;
9445 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9446 let logger = test_utils::TestLogger::new();
9448 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9449 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9451 let config = UserConfig::default();
9453 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9454 let static_remote_key_required: u64 = 1 << 12;
9455 let simple_anchors_required: u64 = 1 << 20;
9456 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9457 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9458 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9459 assert!(!simple_anchors_init.requires_unknown_bits());
9460 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9462 // First, we'll try to open a channel between A and B where A requests a channel type for
9463 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9464 // B as it's not supported by LDK.
9465 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9466 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9467 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9471 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9472 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9474 let res = InboundV1Channel::<&TestKeysInterface>::new(
9475 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9476 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9477 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9479 assert!(res.is_err());
9481 // Then, we'll try to open another channel where A requests a channel type for
9482 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9483 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9485 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9486 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9487 10000000, 100000, 42, &config, 0, 42, None
9490 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9492 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9493 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9494 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9495 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9498 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9499 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9501 let res = channel_a.accept_channel(
9502 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9504 assert!(res.is_err());
9508 fn test_waiting_for_batch() {
9509 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9510 let logger = test_utils::TestLogger::new();
9511 let secp_ctx = Secp256k1::new();
9512 let seed = [42; 32];
9513 let network = Network::Testnet;
9514 let best_block = BestBlock::from_network(network);
9515 let chain_hash = ChainHash::using_genesis_block(network);
9516 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9518 let mut config = UserConfig::default();
9519 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9520 // channel in a batch before all channels are ready.
9521 config.channel_handshake_limits.trust_own_funding_0conf = true;
9523 // Create a channel from node a to node b that will be part of batch funding.
9524 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9525 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9530 &channelmanager::provided_init_features(&config),
9540 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9541 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9542 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9547 &channelmanager::provided_channel_type_features(&config),
9548 &channelmanager::provided_init_features(&config),
9554 true, // Allow node b to send a 0conf channel_ready.
9557 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9558 node_a_chan.accept_channel(
9559 &accept_channel_msg,
9560 &config.channel_handshake_limits,
9561 &channelmanager::provided_init_features(&config),
9564 // Fund the channel with a batch funding transaction.
9565 let output_script = node_a_chan.context.get_funding_redeemscript();
9566 let tx = Transaction {
9568 lock_time: LockTime::ZERO,
9572 value: 10000000, script_pubkey: output_script.clone(),
9575 value: 10000000, script_pubkey: Builder::new().into_script(),
9578 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9579 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9584 ).map_err(|_| ()).unwrap();
9585 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9586 &funding_created_msg.unwrap(),
9590 ).map_err(|_| ()).unwrap();
9591 let node_b_updates = node_b_chan.monitor_updating_restored(
9599 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9600 // broadcasting the funding transaction until the batch is ready.
9601 let _ = node_a_chan.funding_signed(
9602 &funding_signed_msg.unwrap(),
9607 let node_a_updates = node_a_chan.monitor_updating_restored(
9614 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9615 // as the funding transaction depends on all channels in the batch becoming ready.
9616 assert!(node_a_updates.channel_ready.is_none());
9617 assert!(node_a_updates.funding_broadcastable.is_none());
9619 node_a_chan.context.channel_state,
9620 ChannelState::FundingSent as u32 |
9621 ChannelState::WaitingForBatch as u32,
9624 // It is possible to receive a 0conf channel_ready from the remote node.
9625 node_a_chan.channel_ready(
9626 &node_b_updates.channel_ready.unwrap(),
9634 node_a_chan.context.channel_state,
9635 ChannelState::FundingSent as u32 |
9636 ChannelState::WaitingForBatch as u32 |
9637 ChannelState::TheirChannelReady as u32,
9640 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9641 node_a_chan.set_batch_ready();
9643 node_a_chan.context.channel_state,
9644 ChannelState::FundingSent as u32 |
9645 ChannelState::TheirChannelReady as u32,
9647 assert!(node_a_chan.check_get_channel_ready(0).is_some());