1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
264 /// There are a few "states" and then a number of flags which can be applied:
265 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
266 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
267 /// move on to `ChannelReady`.
268 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
269 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
270 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
272 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
273 OurInitSent = 1 << 0,
274 /// Implies we have received their `open_channel`/`accept_channel` message
275 TheirInitSent = 1 << 1,
276 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
277 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
278 /// upon receipt of `funding_created`, so simply skip this state.
280 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
281 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
282 /// and our counterparty consider the funding transaction confirmed.
284 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
285 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
286 TheirChannelReady = 1 << 4,
287 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
288 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
289 OurChannelReady = 1 << 5,
291 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
292 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
294 PeerDisconnected = 1 << 7,
295 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
296 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
297 /// sending any outbound messages until they've managed to finish.
298 MonitorUpdateInProgress = 1 << 8,
299 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
300 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
301 /// messages as then we will be unable to determine which HTLCs they included in their
302 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
304 /// Flag is set on `ChannelReady`.
305 AwaitingRemoteRevoke = 1 << 9,
306 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
307 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
308 /// to respond with our own shutdown message when possible.
309 RemoteShutdownSent = 1 << 10,
310 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
311 /// point, we may not add any new HTLCs to the channel.
312 LocalShutdownSent = 1 << 11,
313 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
314 /// to drop us, but we store this anyway.
315 ShutdownComplete = 4096,
316 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
317 /// broadcasting of the funding transaction is being held until all channels in the batch
318 /// have received funding_signed and have their monitors persisted.
319 WaitingForBatch = 1 << 13,
321 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
322 ChannelState::LocalShutdownSent as u32 |
323 ChannelState::RemoteShutdownSent as u32;
324 const MULTI_STATE_FLAGS: u32 =
325 BOTH_SIDES_SHUTDOWN_MASK |
326 ChannelState::PeerDisconnected as u32 |
327 ChannelState::MonitorUpdateInProgress as u32;
328 const STATE_FLAGS: u32 =
330 ChannelState::TheirChannelReady as u32 |
331 ChannelState::OurChannelReady as u32 |
332 ChannelState::AwaitingRemoteRevoke as u32 |
333 ChannelState::WaitingForBatch as u32;
335 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
337 pub const DEFAULT_MAX_HTLCS: u16 = 50;
339 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
340 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
341 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
342 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
346 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
348 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
350 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
352 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
353 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
354 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
355 /// `holder_max_htlc_value_in_flight_msat`.
356 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
358 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
359 /// `option_support_large_channel` (aka wumbo channels) is not supported.
361 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
363 /// Total bitcoin supply in satoshis.
364 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
366 /// The maximum network dust limit for standard script formats. This currently represents the
367 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
368 /// transaction non-standard and thus refuses to relay it.
369 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
370 /// implementations use this value for their dust limit today.
371 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
373 /// The maximum channel dust limit we will accept from our counterparty.
374 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
376 /// The dust limit is used for both the commitment transaction outputs as well as the closing
377 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
378 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
379 /// In order to avoid having to concern ourselves with standardness during the closing process, we
380 /// simply require our counterparty to use a dust limit which will leave any segwit output
382 /// See <https://github.com/lightning/bolts/issues/905> for more details.
383 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
385 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
386 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
388 /// Used to return a simple Error back to ChannelManager. Will get converted to a
389 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
390 /// channel_id in ChannelManager.
391 pub(super) enum ChannelError {
397 impl fmt::Debug for ChannelError {
398 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
400 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
401 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
402 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
407 impl fmt::Display for ChannelError {
408 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
410 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
411 &ChannelError::Warn(ref e) => write!(f, "{}", e),
412 &ChannelError::Close(ref e) => write!(f, "{}", e),
417 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
419 pub peer_id: Option<PublicKey>,
420 pub channel_id: Option<ChannelId>,
423 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
424 fn log(&self, mut record: Record) {
425 record.peer_id = self.peer_id;
426 record.channel_id = self.channel_id;
427 self.logger.log(record)
431 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
432 where L::Target: Logger {
433 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
434 where S::Target: SignerProvider
438 peer_id: Some(context.counterparty_node_id),
439 channel_id: Some(context.channel_id),
444 macro_rules! secp_check {
445 ($res: expr, $err: expr) => {
448 Err(_) => return Err(ChannelError::Close($err)),
453 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
454 /// our counterparty or not. However, we don't want to announce updates right away to avoid
455 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
456 /// our channel_update message and track the current state here.
457 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
458 #[derive(Clone, Copy, PartialEq)]
459 pub(super) enum ChannelUpdateStatus {
460 /// We've announced the channel as enabled and are connected to our peer.
462 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
464 /// Our channel is live again, but we haven't announced the channel as enabled yet.
466 /// We've announced the channel as disabled.
470 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
472 pub enum AnnouncementSigsState {
473 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
474 /// we sent the last `AnnouncementSignatures`.
476 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
477 /// This state never appears on disk - instead we write `NotSent`.
479 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
480 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
481 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
482 /// they send back a `RevokeAndACK`.
483 /// This state never appears on disk - instead we write `NotSent`.
485 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
486 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
490 /// An enum indicating whether the local or remote side offered a given HTLC.
496 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
499 pending_htlcs_value_msat: u64,
500 on_counterparty_tx_dust_exposure_msat: u64,
501 on_holder_tx_dust_exposure_msat: u64,
502 holding_cell_msat: u64,
503 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
506 /// An enum gathering stats on commitment transaction, either local or remote.
507 struct CommitmentStats<'a> {
508 tx: CommitmentTransaction, // the transaction info
509 feerate_per_kw: u32, // the feerate included to build the transaction
510 total_fee_sat: u64, // the total fee included in the transaction
511 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
512 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
513 local_balance_msat: u64, // local balance before fees but considering dust limits
514 remote_balance_msat: u64, // remote balance before fees but considering dust limits
515 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
518 /// Used when calculating whether we or the remote can afford an additional HTLC.
519 struct HTLCCandidate {
521 origin: HTLCInitiator,
525 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
533 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
535 enum UpdateFulfillFetch {
537 monitor_update: ChannelMonitorUpdate,
538 htlc_value_msat: u64,
539 msg: Option<msgs::UpdateFulfillHTLC>,
544 /// The return type of get_update_fulfill_htlc_and_commit.
545 pub enum UpdateFulfillCommitFetch {
546 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
547 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
548 /// previously placed in the holding cell (and has since been removed).
550 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
551 monitor_update: ChannelMonitorUpdate,
552 /// The value of the HTLC which was claimed, in msat.
553 htlc_value_msat: u64,
555 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
556 /// or has been forgotten (presumably previously claimed).
560 /// The return value of `monitor_updating_restored`
561 pub(super) struct MonitorRestoreUpdates {
562 pub raa: Option<msgs::RevokeAndACK>,
563 pub commitment_update: Option<msgs::CommitmentUpdate>,
564 pub order: RAACommitmentOrder,
565 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
566 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
567 pub finalized_claimed_htlcs: Vec<HTLCSource>,
568 pub funding_broadcastable: Option<Transaction>,
569 pub channel_ready: Option<msgs::ChannelReady>,
570 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
573 /// The return value of `signer_maybe_unblocked`
575 pub(super) struct SignerResumeUpdates {
576 pub commitment_update: Option<msgs::CommitmentUpdate>,
577 pub funding_signed: Option<msgs::FundingSigned>,
578 pub funding_created: Option<msgs::FundingCreated>,
579 pub channel_ready: Option<msgs::ChannelReady>,
582 /// The return value of `channel_reestablish`
583 pub(super) struct ReestablishResponses {
584 pub channel_ready: Option<msgs::ChannelReady>,
585 pub raa: Option<msgs::RevokeAndACK>,
586 pub commitment_update: Option<msgs::CommitmentUpdate>,
587 pub order: RAACommitmentOrder,
588 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
589 pub shutdown_msg: Option<msgs::Shutdown>,
592 /// The result of a shutdown that should be handled.
594 pub(crate) struct ShutdownResult {
595 /// A channel monitor update to apply.
596 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
597 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
598 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
599 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
600 /// propagated to the remainder of the batch.
601 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
604 /// If the majority of the channels funds are to the fundee and the initiator holds only just
605 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
606 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
607 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
608 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
609 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
610 /// by this multiple without hitting this case, before sending.
611 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
612 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
613 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
614 /// leave the channel less usable as we hold a bigger reserve.
615 #[cfg(any(fuzzing, test))]
616 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
617 #[cfg(not(any(fuzzing, test)))]
618 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
620 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
621 /// channel creation on an inbound channel, we simply force-close and move on.
622 /// This constant is the one suggested in BOLT 2.
623 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
625 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
626 /// not have enough balance value remaining to cover the onchain cost of this new
627 /// HTLC weight. If this happens, our counterparty fails the reception of our
628 /// commitment_signed including this new HTLC due to infringement on the channel
630 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
631 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
632 /// leads to a channel force-close. Ultimately, this is an issue coming from the
633 /// design of LN state machines, allowing asynchronous updates.
634 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
636 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
637 /// commitment transaction fees, with at least this many HTLCs present on the commitment
638 /// transaction (not counting the value of the HTLCs themselves).
639 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
641 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
642 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
643 /// ChannelUpdate prompted by the config update. This value was determined as follows:
645 /// * The expected interval between ticks (1 minute).
646 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
647 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
648 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
649 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
651 /// The number of ticks that may elapse while we're waiting for a response to a
652 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
655 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
656 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
658 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
659 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
660 /// exceeding this age limit will be force-closed and purged from memory.
661 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
663 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
664 pub(crate) const COINBASE_MATURITY: u32 = 100;
666 struct PendingChannelMonitorUpdate {
667 update: ChannelMonitorUpdate,
670 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
671 (0, update, required),
674 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
675 /// its variants containing an appropriate channel struct.
676 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
677 UnfundedOutboundV1(OutboundV1Channel<SP>),
678 UnfundedInboundV1(InboundV1Channel<SP>),
682 impl<'a, SP: Deref> ChannelPhase<SP> where
683 SP::Target: SignerProvider,
684 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
686 pub fn context(&'a self) -> &'a ChannelContext<SP> {
688 ChannelPhase::Funded(chan) => &chan.context,
689 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
690 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
694 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
696 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
697 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
698 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
703 /// Contains all state common to unfunded inbound/outbound channels.
704 pub(super) struct UnfundedChannelContext {
705 /// A counter tracking how many ticks have elapsed since this unfunded channel was
706 /// created. If this unfunded channel reaches peer has yet to respond after reaching
707 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
709 /// This is so that we don't keep channels around that haven't progressed to a funded state
710 /// in a timely manner.
711 unfunded_channel_age_ticks: usize,
714 impl UnfundedChannelContext {
715 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
716 /// having reached the unfunded channel age limit.
718 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
719 pub fn should_expire_unfunded_channel(&mut self) -> bool {
720 self.unfunded_channel_age_ticks += 1;
721 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
725 /// Contains everything about the channel including state, and various flags.
726 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
727 config: LegacyChannelConfig,
729 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
730 // constructed using it. The second element in the tuple corresponds to the number of ticks that
731 // have elapsed since the update occurred.
732 prev_config: Option<(ChannelConfig, usize)>,
734 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
738 /// The current channel ID.
739 channel_id: ChannelId,
740 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
741 /// Will be `None` for channels created prior to 0.0.115.
742 temporary_channel_id: Option<ChannelId>,
745 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
746 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
748 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
749 // Note that a number of our tests were written prior to the behavior here which retransmits
750 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
752 #[cfg(any(test, feature = "_test_utils"))]
753 pub(crate) announcement_sigs_state: AnnouncementSigsState,
754 #[cfg(not(any(test, feature = "_test_utils")))]
755 announcement_sigs_state: AnnouncementSigsState,
757 secp_ctx: Secp256k1<secp256k1::All>,
758 channel_value_satoshis: u64,
760 latest_monitor_update_id: u64,
762 holder_signer: ChannelSignerType<SP>,
763 shutdown_scriptpubkey: Option<ShutdownScript>,
764 destination_script: ScriptBuf,
766 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
767 // generation start at 0 and count up...this simplifies some parts of implementation at the
768 // cost of others, but should really just be changed.
770 cur_holder_commitment_transaction_number: u64,
771 cur_counterparty_commitment_transaction_number: u64,
772 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
773 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
774 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
775 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
777 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
778 /// need to ensure we resend them in the order we originally generated them. Note that because
779 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
780 /// sufficient to simply set this to the opposite of any message we are generating as we
781 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
782 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
784 resend_order: RAACommitmentOrder,
786 monitor_pending_channel_ready: bool,
787 monitor_pending_revoke_and_ack: bool,
788 monitor_pending_commitment_signed: bool,
790 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
791 // responsible for some of the HTLCs here or not - we don't know whether the update in question
792 // completed or not. We currently ignore these fields entirely when force-closing a channel,
793 // but need to handle this somehow or we run the risk of losing HTLCs!
794 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
795 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
796 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
798 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
799 /// but our signer (initially) refused to give us a signature, we should retry at some point in
800 /// the future when the signer indicates it may have a signature for us.
802 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
803 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
804 signer_pending_commitment_update: bool,
805 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
806 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
807 /// outbound or inbound.
808 signer_pending_funding: bool,
810 // pending_update_fee is filled when sending and receiving update_fee.
812 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
813 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
814 // generating new commitment transactions with exactly the same criteria as inbound/outbound
815 // HTLCs with similar state.
816 pending_update_fee: Option<(u32, FeeUpdateState)>,
817 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
818 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
819 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
820 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
821 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
822 holding_cell_update_fee: Option<u32>,
823 next_holder_htlc_id: u64,
824 next_counterparty_htlc_id: u64,
827 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
828 /// when the channel is updated in ways which may impact the `channel_update` message or when a
829 /// new block is received, ensuring it's always at least moderately close to the current real
831 update_time_counter: u32,
833 #[cfg(debug_assertions)]
834 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
835 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
836 #[cfg(debug_assertions)]
837 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
838 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
840 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
841 target_closing_feerate_sats_per_kw: Option<u32>,
843 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
844 /// update, we need to delay processing it until later. We do that here by simply storing the
845 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
846 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
848 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
849 /// transaction. These are set once we reach `closing_negotiation_ready`.
851 pub(crate) closing_fee_limits: Option<(u64, u64)>,
853 closing_fee_limits: Option<(u64, u64)>,
855 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
856 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
857 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
858 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
859 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
861 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
862 /// until we see a `commitment_signed` before doing so.
864 /// We don't bother to persist this - we anticipate this state won't last longer than a few
865 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
866 expecting_peer_commitment_signed: bool,
868 /// The hash of the block in which the funding transaction was included.
869 funding_tx_confirmed_in: Option<BlockHash>,
870 funding_tx_confirmation_height: u32,
871 short_channel_id: Option<u64>,
872 /// Either the height at which this channel was created or the height at which it was last
873 /// serialized if it was serialized by versions prior to 0.0.103.
874 /// We use this to close if funding is never broadcasted.
875 channel_creation_height: u32,
877 counterparty_dust_limit_satoshis: u64,
880 pub(super) holder_dust_limit_satoshis: u64,
882 holder_dust_limit_satoshis: u64,
885 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
887 counterparty_max_htlc_value_in_flight_msat: u64,
890 pub(super) holder_max_htlc_value_in_flight_msat: u64,
892 holder_max_htlc_value_in_flight_msat: u64,
894 /// minimum channel reserve for self to maintain - set by them.
895 counterparty_selected_channel_reserve_satoshis: Option<u64>,
898 pub(super) holder_selected_channel_reserve_satoshis: u64,
900 holder_selected_channel_reserve_satoshis: u64,
902 counterparty_htlc_minimum_msat: u64,
903 holder_htlc_minimum_msat: u64,
905 pub counterparty_max_accepted_htlcs: u16,
907 counterparty_max_accepted_htlcs: u16,
908 holder_max_accepted_htlcs: u16,
909 minimum_depth: Option<u32>,
911 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
913 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
914 funding_transaction: Option<Transaction>,
915 is_batch_funding: Option<()>,
917 counterparty_cur_commitment_point: Option<PublicKey>,
918 counterparty_prev_commitment_point: Option<PublicKey>,
919 counterparty_node_id: PublicKey,
921 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
923 commitment_secrets: CounterpartyCommitmentSecrets,
925 channel_update_status: ChannelUpdateStatus,
926 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
927 /// not complete within a single timer tick (one minute), we should force-close the channel.
928 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
930 /// Note that this field is reset to false on deserialization to give us a chance to connect to
931 /// our peer and start the closing_signed negotiation fresh.
932 closing_signed_in_flight: bool,
934 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
935 /// This can be used to rebroadcast the channel_announcement message later.
936 announcement_sigs: Option<(Signature, Signature)>,
938 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
939 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
940 // be, by comparing the cached values to the fee of the tranaction generated by
941 // `build_commitment_transaction`.
942 #[cfg(any(test, fuzzing))]
943 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
944 #[cfg(any(test, fuzzing))]
945 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
947 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
948 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
949 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
950 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
951 /// message until we receive a channel_reestablish.
953 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
954 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
956 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
957 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
958 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
959 /// unblock the state machine.
961 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
962 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
963 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
965 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
966 /// [`msgs::RevokeAndACK`] message from the counterparty.
967 sent_message_awaiting_response: Option<usize>,
969 #[cfg(any(test, fuzzing))]
970 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
971 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
972 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
973 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
974 // is fine, but as a sanity check in our failure to generate the second claim, we check here
975 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
976 historical_inbound_htlc_fulfills: HashSet<u64>,
978 /// This channel's type, as negotiated during channel open
979 channel_type: ChannelTypeFeatures,
981 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
982 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
983 // the channel's funding UTXO.
985 // We also use this when sending our peer a channel_update that isn't to be broadcasted
986 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
987 // associated channel mapping.
989 // We only bother storing the most recent SCID alias at any time, though our counterparty has
990 // to store all of them.
991 latest_inbound_scid_alias: Option<u64>,
993 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
994 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
995 // don't currently support node id aliases and eventually privacy should be provided with
996 // blinded paths instead of simple scid+node_id aliases.
997 outbound_scid_alias: u64,
999 // We track whether we already emitted a `ChannelPending` event.
1000 channel_pending_event_emitted: bool,
1002 // We track whether we already emitted a `ChannelReady` event.
1003 channel_ready_event_emitted: bool,
1005 /// The unique identifier used to re-derive the private key material for the channel through
1006 /// [`SignerProvider::derive_channel_signer`].
1007 channel_keys_id: [u8; 32],
1009 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1010 /// store it here and only release it to the `ChannelManager` once it asks for it.
1011 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1014 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1015 /// Allowed in any state (including after shutdown)
1016 pub fn get_update_time_counter(&self) -> u32 {
1017 self.update_time_counter
1020 pub fn get_latest_monitor_update_id(&self) -> u64 {
1021 self.latest_monitor_update_id
1024 pub fn should_announce(&self) -> bool {
1025 self.config.announced_channel
1028 pub fn is_outbound(&self) -> bool {
1029 self.channel_transaction_parameters.is_outbound_from_holder
1032 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1033 /// Allowed in any state (including after shutdown)
1034 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1035 self.config.options.forwarding_fee_base_msat
1038 /// Returns true if we've ever received a message from the remote end for this Channel
1039 pub fn have_received_message(&self) -> bool {
1040 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1043 /// Returns true if this channel is fully established and not known to be closing.
1044 /// Allowed in any state (including after shutdown)
1045 pub fn is_usable(&self) -> bool {
1046 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1047 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1050 /// shutdown state returns the state of the channel in its various stages of shutdown
1051 pub fn shutdown_state(&self) -> ChannelShutdownState {
1052 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1053 return ChannelShutdownState::ShutdownComplete;
1055 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1056 return ChannelShutdownState::ShutdownInitiated;
1058 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1059 return ChannelShutdownState::ResolvingHTLCs;
1061 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1062 return ChannelShutdownState::NegotiatingClosingFee;
1064 return ChannelShutdownState::NotShuttingDown;
1067 fn closing_negotiation_ready(&self) -> bool {
1068 self.pending_inbound_htlcs.is_empty() &&
1069 self.pending_outbound_htlcs.is_empty() &&
1070 self.pending_update_fee.is_none() &&
1071 self.channel_state &
1072 (BOTH_SIDES_SHUTDOWN_MASK |
1073 ChannelState::AwaitingRemoteRevoke as u32 |
1074 ChannelState::PeerDisconnected as u32 |
1075 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1078 /// Returns true if this channel is currently available for use. This is a superset of
1079 /// is_usable() and considers things like the channel being temporarily disabled.
1080 /// Allowed in any state (including after shutdown)
1081 pub fn is_live(&self) -> bool {
1082 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1085 // Public utilities:
1087 pub fn channel_id(&self) -> ChannelId {
1091 // Return the `temporary_channel_id` used during channel establishment.
1093 // Will return `None` for channels created prior to LDK version 0.0.115.
1094 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1095 self.temporary_channel_id
1098 pub fn minimum_depth(&self) -> Option<u32> {
1102 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1103 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1104 pub fn get_user_id(&self) -> u128 {
1108 /// Gets the channel's type
1109 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1113 /// Gets the channel's `short_channel_id`.
1115 /// Will return `None` if the channel hasn't been confirmed yet.
1116 pub fn get_short_channel_id(&self) -> Option<u64> {
1117 self.short_channel_id
1120 /// Allowed in any state (including after shutdown)
1121 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1122 self.latest_inbound_scid_alias
1125 /// Allowed in any state (including after shutdown)
1126 pub fn outbound_scid_alias(&self) -> u64 {
1127 self.outbound_scid_alias
1130 /// Returns the holder signer for this channel.
1132 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1133 return &self.holder_signer
1136 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1137 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1138 /// or prior to any channel actions during `Channel` initialization.
1139 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1140 debug_assert_eq!(self.outbound_scid_alias, 0);
1141 self.outbound_scid_alias = outbound_scid_alias;
1144 /// Returns the funding_txo we either got from our peer, or were given by
1145 /// get_funding_created.
1146 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1147 self.channel_transaction_parameters.funding_outpoint
1150 /// Returns the height in which our funding transaction was confirmed.
1151 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1152 let conf_height = self.funding_tx_confirmation_height;
1153 if conf_height > 0 {
1160 /// Returns the block hash in which our funding transaction was confirmed.
1161 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1162 self.funding_tx_confirmed_in
1165 /// Returns the current number of confirmations on the funding transaction.
1166 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1167 if self.funding_tx_confirmation_height == 0 {
1168 // We either haven't seen any confirmation yet, or observed a reorg.
1172 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1175 fn get_holder_selected_contest_delay(&self) -> u16 {
1176 self.channel_transaction_parameters.holder_selected_contest_delay
1179 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1180 &self.channel_transaction_parameters.holder_pubkeys
1183 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1184 self.channel_transaction_parameters.counterparty_parameters
1185 .as_ref().map(|params| params.selected_contest_delay)
1188 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1189 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1192 /// Allowed in any state (including after shutdown)
1193 pub fn get_counterparty_node_id(&self) -> PublicKey {
1194 self.counterparty_node_id
1197 /// Allowed in any state (including after shutdown)
1198 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1199 self.holder_htlc_minimum_msat
1202 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1203 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1204 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1207 /// Allowed in any state (including after shutdown)
1208 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1210 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1211 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1212 // channel might have been used to route very small values (either by honest users or as DoS).
1213 self.channel_value_satoshis * 1000 * 9 / 10,
1215 self.counterparty_max_htlc_value_in_flight_msat
1219 /// Allowed in any state (including after shutdown)
1220 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1221 self.counterparty_htlc_minimum_msat
1224 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1225 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1226 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1229 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1230 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1231 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1233 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1234 party_max_htlc_value_in_flight_msat
1239 pub fn get_value_satoshis(&self) -> u64 {
1240 self.channel_value_satoshis
1243 pub fn get_fee_proportional_millionths(&self) -> u32 {
1244 self.config.options.forwarding_fee_proportional_millionths
1247 pub fn get_cltv_expiry_delta(&self) -> u16 {
1248 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1251 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1252 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1253 where F::Target: FeeEstimator
1255 match self.config.options.max_dust_htlc_exposure {
1256 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1257 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1258 ConfirmationTarget::OnChainSweep) as u64;
1259 feerate_per_kw.saturating_mul(multiplier)
1261 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1265 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1266 pub fn prev_config(&self) -> Option<ChannelConfig> {
1267 self.prev_config.map(|prev_config| prev_config.0)
1270 // Checks whether we should emit a `ChannelPending` event.
1271 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1272 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1275 // Returns whether we already emitted a `ChannelPending` event.
1276 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1277 self.channel_pending_event_emitted
1280 // Remembers that we already emitted a `ChannelPending` event.
1281 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1282 self.channel_pending_event_emitted = true;
1285 // Checks whether we should emit a `ChannelReady` event.
1286 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1287 self.is_usable() && !self.channel_ready_event_emitted
1290 // Remembers that we already emitted a `ChannelReady` event.
1291 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1292 self.channel_ready_event_emitted = true;
1295 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1296 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1297 /// no longer be considered when forwarding HTLCs.
1298 pub fn maybe_expire_prev_config(&mut self) {
1299 if self.prev_config.is_none() {
1302 let prev_config = self.prev_config.as_mut().unwrap();
1304 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1305 self.prev_config = None;
1309 /// Returns the current [`ChannelConfig`] applied to the channel.
1310 pub fn config(&self) -> ChannelConfig {
1314 /// Updates the channel's config. A bool is returned indicating whether the config update
1315 /// applied resulted in a new ChannelUpdate message.
1316 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1317 let did_channel_update =
1318 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1319 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1320 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1321 if did_channel_update {
1322 self.prev_config = Some((self.config.options, 0));
1323 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1324 // policy change to propagate throughout the network.
1325 self.update_time_counter += 1;
1327 self.config.options = *config;
1331 /// Returns true if funding_signed was sent/received and the
1332 /// funding transaction has been broadcast if necessary.
1333 pub fn is_funding_broadcast(&self) -> bool {
1334 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1335 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1338 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1339 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1340 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1341 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1342 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1344 /// @local is used only to convert relevant internal structures which refer to remote vs local
1345 /// to decide value of outputs and direction of HTLCs.
1346 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1347 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1348 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1349 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1350 /// which peer generated this transaction and "to whom" this transaction flows.
1352 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1353 where L::Target: Logger
1355 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1356 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1357 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1359 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1360 let mut remote_htlc_total_msat = 0;
1361 let mut local_htlc_total_msat = 0;
1362 let mut value_to_self_msat_offset = 0;
1364 let mut feerate_per_kw = self.feerate_per_kw;
1365 if let Some((feerate, update_state)) = self.pending_update_fee {
1366 if match update_state {
1367 // Note that these match the inclusion criteria when scanning
1368 // pending_inbound_htlcs below.
1369 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1370 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1371 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1373 feerate_per_kw = feerate;
1377 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1378 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1379 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1381 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1383 macro_rules! get_htlc_in_commitment {
1384 ($htlc: expr, $offered: expr) => {
1385 HTLCOutputInCommitment {
1387 amount_msat: $htlc.amount_msat,
1388 cltv_expiry: $htlc.cltv_expiry,
1389 payment_hash: $htlc.payment_hash,
1390 transaction_output_index: None
1395 macro_rules! add_htlc_output {
1396 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1397 if $outbound == local { // "offered HTLC output"
1398 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1399 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1402 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1404 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1405 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1406 included_non_dust_htlcs.push((htlc_in_tx, $source));
1408 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1409 included_dust_htlcs.push((htlc_in_tx, $source));
1412 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1413 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1416 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1418 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1419 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1420 included_non_dust_htlcs.push((htlc_in_tx, $source));
1422 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1423 included_dust_htlcs.push((htlc_in_tx, $source));
1429 for ref htlc in self.pending_inbound_htlcs.iter() {
1430 let (include, state_name) = match htlc.state {
1431 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1432 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1433 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1434 InboundHTLCState::Committed => (true, "Committed"),
1435 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1439 add_htlc_output!(htlc, false, None, state_name);
1440 remote_htlc_total_msat += htlc.amount_msat;
1442 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1444 &InboundHTLCState::LocalRemoved(ref reason) => {
1445 if generated_by_local {
1446 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1447 value_to_self_msat_offset += htlc.amount_msat as i64;
1456 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1458 for ref htlc in self.pending_outbound_htlcs.iter() {
1459 let (include, state_name) = match htlc.state {
1460 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1461 OutboundHTLCState::Committed => (true, "Committed"),
1462 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1463 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1464 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1467 let preimage_opt = match htlc.state {
1468 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1469 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1470 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1474 if let Some(preimage) = preimage_opt {
1475 preimages.push(preimage);
1479 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1480 local_htlc_total_msat += htlc.amount_msat;
1482 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1484 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1485 value_to_self_msat_offset -= htlc.amount_msat as i64;
1487 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1488 if !generated_by_local {
1489 value_to_self_msat_offset -= htlc.amount_msat as i64;
1497 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1498 assert!(value_to_self_msat >= 0);
1499 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1500 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1501 // "violate" their reserve value by couting those against it. Thus, we have to convert
1502 // everything to i64 before subtracting as otherwise we can overflow.
1503 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1504 assert!(value_to_remote_msat >= 0);
1506 #[cfg(debug_assertions)]
1508 // Make sure that the to_self/to_remote is always either past the appropriate
1509 // channel_reserve *or* it is making progress towards it.
1510 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1511 self.holder_max_commitment_tx_output.lock().unwrap()
1513 self.counterparty_max_commitment_tx_output.lock().unwrap()
1515 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1516 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1517 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1518 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1521 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1522 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1523 let (value_to_self, value_to_remote) = if self.is_outbound() {
1524 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1526 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1529 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1530 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1531 let (funding_pubkey_a, funding_pubkey_b) = if local {
1532 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1534 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1537 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1538 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1543 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1544 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1549 let num_nondust_htlcs = included_non_dust_htlcs.len();
1551 let channel_parameters =
1552 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1553 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1554 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1561 &mut included_non_dust_htlcs,
1564 let mut htlcs_included = included_non_dust_htlcs;
1565 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1566 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1567 htlcs_included.append(&mut included_dust_htlcs);
1569 // For the stats, trimmed-to-0 the value in msats accordingly
1570 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1571 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1579 local_balance_msat: value_to_self_msat as u64,
1580 remote_balance_msat: value_to_remote_msat as u64,
1586 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1587 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1588 /// our counterparty!)
1589 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1590 /// TODO Some magic rust shit to compile-time check this?
1591 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1592 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1593 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1594 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1595 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1597 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1601 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1602 /// will sign and send to our counterparty.
1603 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1604 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1605 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1606 //may see payments to it!
1607 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1608 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1609 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1611 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1614 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1615 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1616 /// Panics if called before accept_channel/InboundV1Channel::new
1617 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1618 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1621 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1622 &self.get_counterparty_pubkeys().funding_pubkey
1625 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1629 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1630 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1631 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1632 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1633 // more dust balance if the feerate increases when we have several HTLCs pending
1634 // which are near the dust limit.
1635 let mut feerate_per_kw = self.feerate_per_kw;
1636 // If there's a pending update fee, use it to ensure we aren't under-estimating
1637 // potential feerate updates coming soon.
1638 if let Some((feerate, _)) = self.pending_update_fee {
1639 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1641 if let Some(feerate) = outbound_feerate_update {
1642 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1644 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1647 /// Get forwarding information for the counterparty.
1648 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1649 self.counterparty_forwarding_info.clone()
1652 /// Returns a HTLCStats about inbound pending htlcs
1653 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1655 let mut stats = HTLCStats {
1656 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1657 pending_htlcs_value_msat: 0,
1658 on_counterparty_tx_dust_exposure_msat: 0,
1659 on_holder_tx_dust_exposure_msat: 0,
1660 holding_cell_msat: 0,
1661 on_holder_tx_holding_cell_htlcs_count: 0,
1664 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1667 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1668 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1669 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1671 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1672 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1673 for ref htlc in context.pending_inbound_htlcs.iter() {
1674 stats.pending_htlcs_value_msat += htlc.amount_msat;
1675 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1676 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1678 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1679 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1685 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1686 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1688 let mut stats = HTLCStats {
1689 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1690 pending_htlcs_value_msat: 0,
1691 on_counterparty_tx_dust_exposure_msat: 0,
1692 on_holder_tx_dust_exposure_msat: 0,
1693 holding_cell_msat: 0,
1694 on_holder_tx_holding_cell_htlcs_count: 0,
1697 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1700 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1701 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1702 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1704 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1705 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1706 for ref htlc in context.pending_outbound_htlcs.iter() {
1707 stats.pending_htlcs_value_msat += htlc.amount_msat;
1708 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1709 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1711 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1712 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1716 for update in context.holding_cell_htlc_updates.iter() {
1717 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1718 stats.pending_htlcs += 1;
1719 stats.pending_htlcs_value_msat += amount_msat;
1720 stats.holding_cell_msat += amount_msat;
1721 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1722 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1724 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1725 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1727 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1734 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1735 /// Doesn't bother handling the
1736 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1737 /// corner case properly.
1738 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1739 -> AvailableBalances
1740 where F::Target: FeeEstimator
1742 let context = &self;
1743 // Note that we have to handle overflow due to the above case.
1744 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1745 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1747 let mut balance_msat = context.value_to_self_msat;
1748 for ref htlc in context.pending_inbound_htlcs.iter() {
1749 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1750 balance_msat += htlc.amount_msat;
1753 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1755 let outbound_capacity_msat = context.value_to_self_msat
1756 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1758 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1760 let mut available_capacity_msat = outbound_capacity_msat;
1762 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1763 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1767 if context.is_outbound() {
1768 // We should mind channel commit tx fee when computing how much of the available capacity
1769 // can be used in the next htlc. Mirrors the logic in send_htlc.
1771 // The fee depends on whether the amount we will be sending is above dust or not,
1772 // and the answer will in turn change the amount itself — making it a circular
1774 // This complicates the computation around dust-values, up to the one-htlc-value.
1775 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1776 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1777 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1780 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1781 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1782 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1783 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1784 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1785 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1786 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1789 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1790 // value ends up being below dust, we have this fee available again. In that case,
1791 // match the value to right-below-dust.
1792 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1793 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1794 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1795 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1796 debug_assert!(one_htlc_difference_msat != 0);
1797 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1798 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1799 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1801 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1804 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1805 // sending a new HTLC won't reduce their balance below our reserve threshold.
1806 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1807 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1808 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1811 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1812 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1814 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1815 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1816 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1818 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1819 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1820 // we've selected for them, we can only send dust HTLCs.
1821 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1825 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1827 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1828 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1829 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1830 // send above the dust limit (as the router can always overpay to meet the dust limit).
1831 let mut remaining_msat_below_dust_exposure_limit = None;
1832 let mut dust_exposure_dust_limit_msat = 0;
1833 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1835 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1836 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1838 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1839 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1840 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1842 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1843 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1844 remaining_msat_below_dust_exposure_limit =
1845 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1846 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1849 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1850 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1851 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1852 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1853 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1854 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1857 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1858 if available_capacity_msat < dust_exposure_dust_limit_msat {
1859 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1861 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1865 available_capacity_msat = cmp::min(available_capacity_msat,
1866 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1868 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1869 available_capacity_msat = 0;
1873 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1874 - context.value_to_self_msat as i64
1875 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1876 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1878 outbound_capacity_msat,
1879 next_outbound_htlc_limit_msat: available_capacity_msat,
1880 next_outbound_htlc_minimum_msat,
1885 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1886 let context = &self;
1887 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1890 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1891 /// number of pending HTLCs that are on track to be in our next commitment tx.
1893 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1894 /// `fee_spike_buffer_htlc` is `Some`.
1896 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1897 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1899 /// Dust HTLCs are excluded.
1900 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1901 let context = &self;
1902 assert!(context.is_outbound());
1904 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1907 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1908 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1910 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1911 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1913 let mut addl_htlcs = 0;
1914 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1916 HTLCInitiator::LocalOffered => {
1917 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1921 HTLCInitiator::RemoteOffered => {
1922 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1928 let mut included_htlcs = 0;
1929 for ref htlc in context.pending_inbound_htlcs.iter() {
1930 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1933 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1934 // transaction including this HTLC if it times out before they RAA.
1935 included_htlcs += 1;
1938 for ref htlc in context.pending_outbound_htlcs.iter() {
1939 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1943 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1944 OutboundHTLCState::Committed => included_htlcs += 1,
1945 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1946 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1947 // transaction won't be generated until they send us their next RAA, which will mean
1948 // dropping any HTLCs in this state.
1953 for htlc in context.holding_cell_htlc_updates.iter() {
1955 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1956 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1961 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1962 // ack we're guaranteed to never include them in commitment txs anymore.
1966 let num_htlcs = included_htlcs + addl_htlcs;
1967 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1968 #[cfg(any(test, fuzzing))]
1971 if fee_spike_buffer_htlc.is_some() {
1972 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1974 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1975 + context.holding_cell_htlc_updates.len();
1976 let commitment_tx_info = CommitmentTxInfoCached {
1978 total_pending_htlcs,
1979 next_holder_htlc_id: match htlc.origin {
1980 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1981 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1983 next_counterparty_htlc_id: match htlc.origin {
1984 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1985 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1987 feerate: context.feerate_per_kw,
1989 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1994 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1995 /// pending HTLCs that are on track to be in their next commitment tx
1997 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1998 /// `fee_spike_buffer_htlc` is `Some`.
2000 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2001 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2003 /// Dust HTLCs are excluded.
2004 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2005 let context = &self;
2006 assert!(!context.is_outbound());
2008 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2011 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2012 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2014 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2015 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2017 let mut addl_htlcs = 0;
2018 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2020 HTLCInitiator::LocalOffered => {
2021 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2025 HTLCInitiator::RemoteOffered => {
2026 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2032 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2033 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2034 // committed outbound HTLCs, see below.
2035 let mut included_htlcs = 0;
2036 for ref htlc in context.pending_inbound_htlcs.iter() {
2037 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2040 included_htlcs += 1;
2043 for ref htlc in context.pending_outbound_htlcs.iter() {
2044 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2047 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2048 // i.e. if they've responded to us with an RAA after announcement.
2050 OutboundHTLCState::Committed => included_htlcs += 1,
2051 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2052 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2057 let num_htlcs = included_htlcs + addl_htlcs;
2058 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2059 #[cfg(any(test, fuzzing))]
2062 if fee_spike_buffer_htlc.is_some() {
2063 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2065 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2066 let commitment_tx_info = CommitmentTxInfoCached {
2068 total_pending_htlcs,
2069 next_holder_htlc_id: match htlc.origin {
2070 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2071 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2073 next_counterparty_htlc_id: match htlc.origin {
2074 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2075 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2077 feerate: context.feerate_per_kw,
2079 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2084 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2085 where F: Fn() -> Option<O> {
2086 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2087 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2094 /// Returns the transaction if there is a pending funding transaction that is yet to be
2096 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2097 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2100 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2102 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2103 self.if_unbroadcasted_funding(||
2104 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2108 /// Returns whether the channel is funded in a batch.
2109 pub fn is_batch_funding(&self) -> bool {
2110 self.is_batch_funding.is_some()
2113 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2115 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2116 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2119 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2120 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2121 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2122 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2123 /// immediately (others we will have to allow to time out).
2124 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2125 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2126 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2127 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2128 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2129 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2131 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2132 // return them to fail the payment.
2133 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2134 let counterparty_node_id = self.get_counterparty_node_id();
2135 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2137 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2138 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2143 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2144 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2145 // returning a channel monitor update here would imply a channel monitor update before
2146 // we even registered the channel monitor to begin with, which is invalid.
2147 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2148 // funding transaction, don't return a funding txo (which prevents providing the
2149 // monitor update to the user, even if we return one).
2150 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2151 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2152 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2153 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2154 update_id: self.latest_monitor_update_id,
2155 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2159 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2161 self.channel_state = ChannelState::ShutdownComplete as u32;
2162 self.update_time_counter += 1;
2165 dropped_outbound_htlcs,
2166 unbroadcasted_batch_funding_txid,
2170 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2171 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2172 let counterparty_keys = self.build_remote_transaction_keys();
2173 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2174 let signature = match &self.holder_signer {
2175 // TODO (taproot|arik): move match into calling method for Taproot
2176 ChannelSignerType::Ecdsa(ecdsa) => {
2177 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2178 .map(|(sig, _)| sig).ok()?
2180 // TODO (taproot|arik)
2185 if self.signer_pending_funding {
2186 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2187 self.signer_pending_funding = false;
2190 Some(msgs::FundingCreated {
2191 temporary_channel_id: self.temporary_channel_id.unwrap(),
2192 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2193 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2196 partial_signature_with_nonce: None,
2198 next_local_nonce: None,
2202 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2203 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2204 let counterparty_keys = self.build_remote_transaction_keys();
2205 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2207 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2208 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2209 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2210 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2212 match &self.holder_signer {
2213 // TODO (arik): move match into calling method for Taproot
2214 ChannelSignerType::Ecdsa(ecdsa) => {
2215 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2216 .map(|(signature, _)| msgs::FundingSigned {
2217 channel_id: self.channel_id(),
2220 partial_signature_with_nonce: None,
2224 if funding_signed.is_none() {
2225 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2226 self.signer_pending_funding = true;
2227 } else if self.signer_pending_funding {
2228 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2229 self.signer_pending_funding = false;
2232 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2233 (counterparty_initial_commitment_tx, funding_signed)
2235 // TODO (taproot|arik)
2242 // Internal utility functions for channels
2244 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2245 /// `channel_value_satoshis` in msat, set through
2246 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2248 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2250 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2251 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2252 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2254 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2257 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2259 channel_value_satoshis * 10 * configured_percent
2262 /// Returns a minimum channel reserve value the remote needs to maintain,
2263 /// required by us according to the configured or default
2264 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2266 /// Guaranteed to return a value no larger than channel_value_satoshis
2268 /// This is used both for outbound and inbound channels and has lower bound
2269 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2270 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2271 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2272 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2275 /// This is for legacy reasons, present for forward-compatibility.
2276 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2277 /// from storage. Hence, we use this function to not persist default values of
2278 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2279 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2280 let (q, _) = channel_value_satoshis.overflowing_div(100);
2281 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2284 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2285 // Note that num_htlcs should not include dust HTLCs.
2287 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2288 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2291 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2292 // Note that num_htlcs should not include dust HTLCs.
2293 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2294 // Note that we need to divide before multiplying to round properly,
2295 // since the lowest denomination of bitcoin on-chain is the satoshi.
2296 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2299 // Holder designates channel data owned for the benefit of the user client.
2300 // Counterparty designates channel data owned by the another channel participant entity.
2301 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2302 pub context: ChannelContext<SP>,
2305 #[cfg(any(test, fuzzing))]
2306 struct CommitmentTxInfoCached {
2308 total_pending_htlcs: usize,
2309 next_holder_htlc_id: u64,
2310 next_counterparty_htlc_id: u64,
2314 impl<SP: Deref> Channel<SP> where
2315 SP::Target: SignerProvider,
2316 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2318 fn check_remote_fee<F: Deref, L: Deref>(
2319 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2320 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2321 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2323 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2324 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2326 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2328 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2329 if feerate_per_kw < lower_limit {
2330 if let Some(cur_feerate) = cur_feerate_per_kw {
2331 if feerate_per_kw > cur_feerate {
2333 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2334 cur_feerate, feerate_per_kw);
2338 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2344 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2345 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2346 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2347 // outside of those situations will fail.
2348 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2352 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2357 1 + // script length (0)
2361 )*4 + // * 4 for non-witness parts
2362 2 + // witness marker and flag
2363 1 + // witness element count
2364 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2365 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2366 2*(1 + 71); // two signatures + sighash type flags
2367 if let Some(spk) = a_scriptpubkey {
2368 ret += ((8+1) + // output values and script length
2369 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2371 if let Some(spk) = b_scriptpubkey {
2372 ret += ((8+1) + // output values and script length
2373 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2379 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2380 assert!(self.context.pending_inbound_htlcs.is_empty());
2381 assert!(self.context.pending_outbound_htlcs.is_empty());
2382 assert!(self.context.pending_update_fee.is_none());
2384 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2385 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2386 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2388 if value_to_holder < 0 {
2389 assert!(self.context.is_outbound());
2390 total_fee_satoshis += (-value_to_holder) as u64;
2391 } else if value_to_counterparty < 0 {
2392 assert!(!self.context.is_outbound());
2393 total_fee_satoshis += (-value_to_counterparty) as u64;
2396 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2397 value_to_counterparty = 0;
2400 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2401 value_to_holder = 0;
2404 assert!(self.context.shutdown_scriptpubkey.is_some());
2405 let holder_shutdown_script = self.get_closing_scriptpubkey();
2406 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2407 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2409 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2410 (closing_transaction, total_fee_satoshis)
2413 fn funding_outpoint(&self) -> OutPoint {
2414 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2417 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2420 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2421 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2423 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2425 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2426 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2427 where L::Target: Logger {
2428 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2429 // (see equivalent if condition there).
2430 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2431 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2432 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2433 self.context.latest_monitor_update_id = mon_update_id;
2434 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2435 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2439 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2440 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2441 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2442 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2444 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2445 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2447 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2449 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2450 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2451 // these, but for now we just have to treat them as normal.
2453 let mut pending_idx = core::usize::MAX;
2454 let mut htlc_value_msat = 0;
2455 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2456 if htlc.htlc_id == htlc_id_arg {
2457 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2458 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2459 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2461 InboundHTLCState::Committed => {},
2462 InboundHTLCState::LocalRemoved(ref reason) => {
2463 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2465 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2466 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2468 return UpdateFulfillFetch::DuplicateClaim {};
2471 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2472 // Don't return in release mode here so that we can update channel_monitor
2476 htlc_value_msat = htlc.amount_msat;
2480 if pending_idx == core::usize::MAX {
2481 #[cfg(any(test, fuzzing))]
2482 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2483 // this is simply a duplicate claim, not previously failed and we lost funds.
2484 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2485 return UpdateFulfillFetch::DuplicateClaim {};
2488 // Now update local state:
2490 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2491 // can claim it even if the channel hits the chain before we see their next commitment.
2492 self.context.latest_monitor_update_id += 1;
2493 let monitor_update = ChannelMonitorUpdate {
2494 update_id: self.context.latest_monitor_update_id,
2495 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2496 payment_preimage: payment_preimage_arg.clone(),
2500 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2501 // Note that this condition is the same as the assertion in
2502 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2503 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2504 // do not not get into this branch.
2505 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2506 match pending_update {
2507 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2508 if htlc_id_arg == htlc_id {
2509 // Make sure we don't leave latest_monitor_update_id incremented here:
2510 self.context.latest_monitor_update_id -= 1;
2511 #[cfg(any(test, fuzzing))]
2512 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2513 return UpdateFulfillFetch::DuplicateClaim {};
2516 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2517 if htlc_id_arg == htlc_id {
2518 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2519 // TODO: We may actually be able to switch to a fulfill here, though its
2520 // rare enough it may not be worth the complexity burden.
2521 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2522 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2528 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2529 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2530 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2532 #[cfg(any(test, fuzzing))]
2533 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2534 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2536 #[cfg(any(test, fuzzing))]
2537 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2540 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2541 if let InboundHTLCState::Committed = htlc.state {
2543 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2544 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2546 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2547 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2550 UpdateFulfillFetch::NewClaim {
2553 msg: Some(msgs::UpdateFulfillHTLC {
2554 channel_id: self.context.channel_id(),
2555 htlc_id: htlc_id_arg,
2556 payment_preimage: payment_preimage_arg,
2561 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2562 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2563 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2564 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2565 // Even if we aren't supposed to let new monitor updates with commitment state
2566 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2567 // matter what. Sadly, to push a new monitor update which flies before others
2568 // already queued, we have to insert it into the pending queue and update the
2569 // update_ids of all the following monitors.
2570 if release_cs_monitor && msg.is_some() {
2571 let mut additional_update = self.build_commitment_no_status_check(logger);
2572 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2573 // to be strictly increasing by one, so decrement it here.
2574 self.context.latest_monitor_update_id = monitor_update.update_id;
2575 monitor_update.updates.append(&mut additional_update.updates);
2577 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2578 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2579 monitor_update.update_id = new_mon_id;
2580 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2581 held_update.update.update_id += 1;
2584 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2585 let update = self.build_commitment_no_status_check(logger);
2586 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2592 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2593 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2595 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2599 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2600 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2601 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2602 /// before we fail backwards.
2604 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2605 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2606 /// [`ChannelError::Ignore`].
2607 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2608 -> Result<(), ChannelError> where L::Target: Logger {
2609 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2610 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2613 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2614 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2615 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2616 /// before we fail backwards.
2618 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2619 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2620 /// [`ChannelError::Ignore`].
2621 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2622 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2623 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2624 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2626 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2628 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2629 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2630 // these, but for now we just have to treat them as normal.
2632 let mut pending_idx = core::usize::MAX;
2633 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2634 if htlc.htlc_id == htlc_id_arg {
2636 InboundHTLCState::Committed => {},
2637 InboundHTLCState::LocalRemoved(ref reason) => {
2638 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2640 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2645 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2646 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2652 if pending_idx == core::usize::MAX {
2653 #[cfg(any(test, fuzzing))]
2654 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2655 // is simply a duplicate fail, not previously failed and we failed-back too early.
2656 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2660 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2661 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2662 force_holding_cell = true;
2665 // Now update local state:
2666 if force_holding_cell {
2667 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2668 match pending_update {
2669 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2670 if htlc_id_arg == htlc_id {
2671 #[cfg(any(test, fuzzing))]
2672 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2676 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2677 if htlc_id_arg == htlc_id {
2678 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2679 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2685 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2686 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2687 htlc_id: htlc_id_arg,
2693 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2695 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2696 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2699 Ok(Some(msgs::UpdateFailHTLC {
2700 channel_id: self.context.channel_id(),
2701 htlc_id: htlc_id_arg,
2706 // Message handlers:
2708 /// Handles a funding_signed message from the remote end.
2709 /// If this call is successful, broadcast the funding transaction (and not before!)
2710 pub fn funding_signed<L: Deref>(
2711 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2712 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
2716 if !self.context.is_outbound() {
2717 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2719 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2720 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2722 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2723 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2724 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2725 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2728 let funding_script = self.context.get_funding_redeemscript();
2730 let counterparty_keys = self.context.build_remote_transaction_keys();
2731 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2732 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2733 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2735 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2736 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2738 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2739 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2741 let trusted_tx = initial_commitment_tx.trust();
2742 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2743 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2744 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2745 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2746 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2750 let holder_commitment_tx = HolderCommitmentTransaction::new(
2751 initial_commitment_tx,
2754 &self.context.get_holder_pubkeys().funding_pubkey,
2755 self.context.counterparty_funding_pubkey()
2758 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2759 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2762 let funding_redeemscript = self.context.get_funding_redeemscript();
2763 let funding_txo = self.context.get_funding_txo().unwrap();
2764 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2765 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2766 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2767 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2768 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2769 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2770 shutdown_script, self.context.get_holder_selected_contest_delay(),
2771 &self.context.destination_script, (funding_txo, funding_txo_script),
2772 &self.context.channel_transaction_parameters,
2773 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2775 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2776 let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
2777 channel_monitor.provide_initial_counterparty_commitment_tx(
2778 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2779 self.context.cur_counterparty_commitment_transaction_number,
2780 self.context.counterparty_cur_commitment_point.unwrap(),
2781 counterparty_initial_commitment_tx.feerate_per_kw(),
2782 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2783 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
2785 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2786 if self.context.is_batch_funding() {
2787 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2789 self.context.channel_state = ChannelState::FundingSent as u32;
2791 self.context.cur_holder_commitment_transaction_number -= 1;
2792 self.context.cur_counterparty_commitment_transaction_number -= 1;
2794 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2796 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2797 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2801 /// Updates the state of the channel to indicate that all channels in the batch have received
2802 /// funding_signed and persisted their monitors.
2803 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2804 /// treated as a non-batch channel going forward.
2805 pub fn set_batch_ready(&mut self) {
2806 self.context.is_batch_funding = None;
2807 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2810 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2811 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2813 pub fn channel_ready<NS: Deref, L: Deref>(
2814 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2815 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2816 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2818 NS::Target: NodeSigner,
2821 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2822 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2823 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2826 if let Some(scid_alias) = msg.short_channel_id_alias {
2827 if Some(scid_alias) != self.context.short_channel_id {
2828 // The scid alias provided can be used to route payments *from* our counterparty,
2829 // i.e. can be used for inbound payments and provided in invoices, but is not used
2830 // when routing outbound payments.
2831 self.context.latest_inbound_scid_alias = Some(scid_alias);
2835 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2837 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2838 // batch, but we can receive channel_ready messages.
2840 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2841 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2843 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2844 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2845 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2846 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2847 self.context.update_time_counter += 1;
2848 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2849 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2850 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2851 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2853 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2854 // required, or they're sending a fresh SCID alias.
2855 let expected_point =
2856 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2857 // If they haven't ever sent an updated point, the point they send should match
2859 self.context.counterparty_cur_commitment_point
2860 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2861 // If we've advanced the commitment number once, the second commitment point is
2862 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2863 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2864 self.context.counterparty_prev_commitment_point
2866 // If they have sent updated points, channel_ready is always supposed to match
2867 // their "first" point, which we re-derive here.
2868 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2869 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2870 ).expect("We already advanced, so previous secret keys should have been validated already")))
2872 if expected_point != Some(msg.next_per_commitment_point) {
2873 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2877 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2880 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2881 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2883 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2885 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2888 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2889 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2890 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2891 ) -> Result<(), ChannelError>
2892 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2893 FE::Target: FeeEstimator, L::Target: Logger,
2895 // We can't accept HTLCs sent after we've sent a shutdown.
2896 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2897 if local_sent_shutdown {
2898 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2900 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2901 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2902 if remote_sent_shutdown {
2903 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2905 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2906 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2908 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2909 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2911 if msg.amount_msat == 0 {
2912 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2914 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2915 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2918 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2919 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2920 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2921 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2923 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2924 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2927 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2928 // the reserve_satoshis we told them to always have as direct payment so that they lose
2929 // something if we punish them for broadcasting an old state).
2930 // Note that we don't really care about having a small/no to_remote output in our local
2931 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2932 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2933 // present in the next commitment transaction we send them (at least for fulfilled ones,
2934 // failed ones won't modify value_to_self).
2935 // Note that we will send HTLCs which another instance of rust-lightning would think
2936 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2937 // Channel state once they will not be present in the next received commitment
2939 let mut removed_outbound_total_msat = 0;
2940 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2941 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2942 removed_outbound_total_msat += htlc.amount_msat;
2943 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2944 removed_outbound_total_msat += htlc.amount_msat;
2948 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2949 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2952 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2953 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2954 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2956 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2957 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2958 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2959 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2960 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2961 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2962 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2966 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2967 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2968 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2969 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2970 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2971 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2972 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2976 let pending_value_to_self_msat =
2977 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2978 let pending_remote_value_msat =
2979 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2980 if pending_remote_value_msat < msg.amount_msat {
2981 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2984 // Check that the remote can afford to pay for this HTLC on-chain at the current
2985 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2987 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2988 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2989 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2991 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2992 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2996 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
2997 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
2999 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3000 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3004 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3005 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3009 if !self.context.is_outbound() {
3010 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3011 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3012 // side, only on the sender's. Note that with anchor outputs we are no longer as
3013 // sensitive to fee spikes, so we need to account for them.
3014 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3015 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3016 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3017 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3019 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3020 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3021 // the HTLC, i.e. its status is already set to failing.
3022 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3023 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3026 // Check that they won't violate our local required channel reserve by adding this HTLC.
3027 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3028 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3029 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3030 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3033 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3034 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3036 if msg.cltv_expiry >= 500000000 {
3037 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3040 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3041 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3042 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3046 // Now update local state:
3047 self.context.next_counterparty_htlc_id += 1;
3048 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3049 htlc_id: msg.htlc_id,
3050 amount_msat: msg.amount_msat,
3051 payment_hash: msg.payment_hash,
3052 cltv_expiry: msg.cltv_expiry,
3053 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3058 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3060 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3061 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3062 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3063 if htlc.htlc_id == htlc_id {
3064 let outcome = match check_preimage {
3065 None => fail_reason.into(),
3066 Some(payment_preimage) => {
3067 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3068 if payment_hash != htlc.payment_hash {
3069 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3071 OutboundHTLCOutcome::Success(Some(payment_preimage))
3075 OutboundHTLCState::LocalAnnounced(_) =>
3076 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3077 OutboundHTLCState::Committed => {
3078 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3080 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3081 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3086 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3089 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3090 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3091 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3093 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3094 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3097 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3100 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3101 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3102 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3104 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3105 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3108 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3112 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3113 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3114 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3116 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3117 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3120 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3124 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3125 where L::Target: Logger
3127 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3128 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3130 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3131 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3133 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3134 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3137 let funding_script = self.context.get_funding_redeemscript();
3139 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3141 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3142 let commitment_txid = {
3143 let trusted_tx = commitment_stats.tx.trust();
3144 let bitcoin_tx = trusted_tx.built_transaction();
3145 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3147 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3148 log_bytes!(msg.signature.serialize_compact()[..]),
3149 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3150 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3151 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3152 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3156 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3158 // If our counterparty updated the channel fee in this commitment transaction, check that
3159 // they can actually afford the new fee now.
3160 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3161 update_state == FeeUpdateState::RemoteAnnounced
3164 debug_assert!(!self.context.is_outbound());
3165 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3166 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3167 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3170 #[cfg(any(test, fuzzing))]
3172 if self.context.is_outbound() {
3173 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3174 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3175 if let Some(info) = projected_commit_tx_info {
3176 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3177 + self.context.holding_cell_htlc_updates.len();
3178 if info.total_pending_htlcs == total_pending_htlcs
3179 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3180 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3181 && info.feerate == self.context.feerate_per_kw {
3182 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3188 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3189 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3192 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3193 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3194 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3195 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3196 // backwards compatibility, we never use it in production. To provide test coverage, here,
3197 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3198 #[allow(unused_assignments, unused_mut)]
3199 let mut separate_nondust_htlc_sources = false;
3200 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3201 use core::hash::{BuildHasher, Hasher};
3202 // Get a random value using the only std API to do so - the DefaultHasher
3203 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3204 separate_nondust_htlc_sources = rand_val % 2 == 0;
3207 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3208 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3209 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3210 if let Some(_) = htlc.transaction_output_index {
3211 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3212 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3213 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3215 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3216 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3217 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3218 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3219 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3220 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3221 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3222 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3224 if !separate_nondust_htlc_sources {
3225 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3228 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3230 if separate_nondust_htlc_sources {
3231 if let Some(source) = source_opt.take() {
3232 nondust_htlc_sources.push(source);
3235 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3238 let holder_commitment_tx = HolderCommitmentTransaction::new(
3239 commitment_stats.tx,
3241 msg.htlc_signatures.clone(),
3242 &self.context.get_holder_pubkeys().funding_pubkey,
3243 self.context.counterparty_funding_pubkey()
3246 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3247 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3249 // Update state now that we've passed all the can-fail calls...
3250 let mut need_commitment = false;
3251 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3252 if *update_state == FeeUpdateState::RemoteAnnounced {
3253 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3254 need_commitment = true;
3258 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3259 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3260 Some(forward_info.clone())
3262 if let Some(forward_info) = new_forward {
3263 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3264 &htlc.payment_hash, &self.context.channel_id);
3265 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3266 need_commitment = true;
3269 let mut claimed_htlcs = Vec::new();
3270 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3271 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3272 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3273 &htlc.payment_hash, &self.context.channel_id);
3274 // Grab the preimage, if it exists, instead of cloning
3275 let mut reason = OutboundHTLCOutcome::Success(None);
3276 mem::swap(outcome, &mut reason);
3277 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3278 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3279 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3280 // have a `Success(None)` reason. In this case we could forget some HTLC
3281 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3282 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3284 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3286 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3287 need_commitment = true;
3291 self.context.latest_monitor_update_id += 1;
3292 let mut monitor_update = ChannelMonitorUpdate {
3293 update_id: self.context.latest_monitor_update_id,
3294 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3295 commitment_tx: holder_commitment_tx,
3296 htlc_outputs: htlcs_and_sigs,
3298 nondust_htlc_sources,
3302 self.context.cur_holder_commitment_transaction_number -= 1;
3303 self.context.expecting_peer_commitment_signed = false;
3304 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3305 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3306 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3308 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3309 // In case we initially failed monitor updating without requiring a response, we need
3310 // to make sure the RAA gets sent first.
3311 self.context.monitor_pending_revoke_and_ack = true;
3312 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3313 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3314 // the corresponding HTLC status updates so that
3315 // get_last_commitment_update_for_send includes the right HTLCs.
3316 self.context.monitor_pending_commitment_signed = true;
3317 let mut additional_update = self.build_commitment_no_status_check(logger);
3318 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3319 // strictly increasing by one, so decrement it here.
3320 self.context.latest_monitor_update_id = monitor_update.update_id;
3321 monitor_update.updates.append(&mut additional_update.updates);
3323 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3324 &self.context.channel_id);
3325 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3328 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3329 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3330 // we'll send one right away when we get the revoke_and_ack when we
3331 // free_holding_cell_htlcs().
3332 let mut additional_update = self.build_commitment_no_status_check(logger);
3333 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3334 // strictly increasing by one, so decrement it here.
3335 self.context.latest_monitor_update_id = monitor_update.update_id;
3336 monitor_update.updates.append(&mut additional_update.updates);
3340 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3341 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3342 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3343 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3346 /// Public version of the below, checking relevant preconditions first.
3347 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3348 /// returns `(None, Vec::new())`.
3349 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3350 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3351 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3352 where F::Target: FeeEstimator, L::Target: Logger
3354 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3355 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3356 self.free_holding_cell_htlcs(fee_estimator, logger)
3357 } else { (None, Vec::new()) }
3360 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3361 /// for our counterparty.
3362 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3363 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3364 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3365 where F::Target: FeeEstimator, L::Target: Logger
3367 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3368 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3369 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3370 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3372 let mut monitor_update = ChannelMonitorUpdate {
3373 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3374 updates: Vec::new(),
3377 let mut htlc_updates = Vec::new();
3378 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3379 let mut update_add_count = 0;
3380 let mut update_fulfill_count = 0;
3381 let mut update_fail_count = 0;
3382 let mut htlcs_to_fail = Vec::new();
3383 for htlc_update in htlc_updates.drain(..) {
3384 // Note that this *can* fail, though it should be due to rather-rare conditions on
3385 // fee races with adding too many outputs which push our total payments just over
3386 // the limit. In case it's less rare than I anticipate, we may want to revisit
3387 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3388 // to rebalance channels.
3389 match &htlc_update {
3390 &HTLCUpdateAwaitingACK::AddHTLC {
3391 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3392 skimmed_fee_msat, blinding_point, ..
3394 match self.send_htlc(
3395 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3396 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3398 Ok(_) => update_add_count += 1,
3401 ChannelError::Ignore(ref msg) => {
3402 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3403 // If we fail to send here, then this HTLC should
3404 // be failed backwards. Failing to send here
3405 // indicates that this HTLC may keep being put back
3406 // into the holding cell without ever being
3407 // successfully forwarded/failed/fulfilled, causing
3408 // our counterparty to eventually close on us.
3409 htlcs_to_fail.push((source.clone(), *payment_hash));
3412 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3418 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3419 // If an HTLC claim was previously added to the holding cell (via
3420 // `get_update_fulfill_htlc`, then generating the claim message itself must
3421 // not fail - any in between attempts to claim the HTLC will have resulted
3422 // in it hitting the holding cell again and we cannot change the state of a
3423 // holding cell HTLC from fulfill to anything else.
3424 let mut additional_monitor_update =
3425 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3426 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3427 { monitor_update } else { unreachable!() };
3428 update_fulfill_count += 1;
3429 monitor_update.updates.append(&mut additional_monitor_update.updates);
3431 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3432 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3433 Ok(update_fail_msg_option) => {
3434 // If an HTLC failure was previously added to the holding cell (via
3435 // `queue_fail_htlc`) then generating the fail message itself must
3436 // not fail - we should never end up in a state where we double-fail
3437 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3438 // for a full revocation before failing.
3439 debug_assert!(update_fail_msg_option.is_some());
3440 update_fail_count += 1;
3443 if let ChannelError::Ignore(_) = e {}
3445 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3452 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3453 return (None, htlcs_to_fail);
3455 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3456 self.send_update_fee(feerate, false, fee_estimator, logger)
3461 let mut additional_update = self.build_commitment_no_status_check(logger);
3462 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3463 // but we want them to be strictly increasing by one, so reset it here.
3464 self.context.latest_monitor_update_id = monitor_update.update_id;
3465 monitor_update.updates.append(&mut additional_update.updates);
3467 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3468 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3469 update_add_count, update_fulfill_count, update_fail_count);
3471 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3472 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3478 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3479 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3480 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3481 /// generating an appropriate error *after* the channel state has been updated based on the
3482 /// revoke_and_ack message.
3483 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3484 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3485 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3486 where F::Target: FeeEstimator, L::Target: Logger,
3488 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3489 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3491 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3492 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3494 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3495 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3498 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3500 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3501 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3502 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3506 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3507 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3508 // haven't given them a new commitment transaction to broadcast). We should probably
3509 // take advantage of this by updating our channel monitor, sending them an error, and
3510 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3511 // lot of work, and there's some chance this is all a misunderstanding anyway.
3512 // We have to do *something*, though, since our signer may get mad at us for otherwise
3513 // jumping a remote commitment number, so best to just force-close and move on.
3514 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3517 #[cfg(any(test, fuzzing))]
3519 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3520 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3523 match &self.context.holder_signer {
3524 ChannelSignerType::Ecdsa(ecdsa) => {
3525 ecdsa.validate_counterparty_revocation(
3526 self.context.cur_counterparty_commitment_transaction_number + 1,
3528 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3530 // TODO (taproot|arik)
3535 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3536 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3537 self.context.latest_monitor_update_id += 1;
3538 let mut monitor_update = ChannelMonitorUpdate {
3539 update_id: self.context.latest_monitor_update_id,
3540 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3541 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3542 secret: msg.per_commitment_secret,
3546 // Update state now that we've passed all the can-fail calls...
3547 // (note that we may still fail to generate the new commitment_signed message, but that's
3548 // OK, we step the channel here and *then* if the new generation fails we can fail the
3549 // channel based on that, but stepping stuff here should be safe either way.
3550 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3551 self.context.sent_message_awaiting_response = None;
3552 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3553 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3554 self.context.cur_counterparty_commitment_transaction_number -= 1;
3556 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3557 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3560 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3561 let mut to_forward_infos = Vec::new();
3562 let mut revoked_htlcs = Vec::new();
3563 let mut finalized_claimed_htlcs = Vec::new();
3564 let mut update_fail_htlcs = Vec::new();
3565 let mut update_fail_malformed_htlcs = Vec::new();
3566 let mut require_commitment = false;
3567 let mut value_to_self_msat_diff: i64 = 0;
3570 // Take references explicitly so that we can hold multiple references to self.context.
3571 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3572 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3573 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3575 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3576 pending_inbound_htlcs.retain(|htlc| {
3577 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3578 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3579 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3580 value_to_self_msat_diff += htlc.amount_msat as i64;
3582 *expecting_peer_commitment_signed = true;
3586 pending_outbound_htlcs.retain(|htlc| {
3587 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3588 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3589 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3590 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3592 finalized_claimed_htlcs.push(htlc.source.clone());
3593 // They fulfilled, so we sent them money
3594 value_to_self_msat_diff -= htlc.amount_msat as i64;
3599 for htlc in pending_inbound_htlcs.iter_mut() {
3600 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3602 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3606 let mut state = InboundHTLCState::Committed;
3607 mem::swap(&mut state, &mut htlc.state);
3609 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3610 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3611 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3612 require_commitment = true;
3613 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3614 match forward_info {
3615 PendingHTLCStatus::Fail(fail_msg) => {
3616 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3617 require_commitment = true;
3619 HTLCFailureMsg::Relay(msg) => {
3620 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3621 update_fail_htlcs.push(msg)
3623 HTLCFailureMsg::Malformed(msg) => {
3624 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3625 update_fail_malformed_htlcs.push(msg)
3629 PendingHTLCStatus::Forward(forward_info) => {
3630 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3631 to_forward_infos.push((forward_info, htlc.htlc_id));
3632 htlc.state = InboundHTLCState::Committed;
3638 for htlc in pending_outbound_htlcs.iter_mut() {
3639 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3640 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3641 htlc.state = OutboundHTLCState::Committed;
3642 *expecting_peer_commitment_signed = true;
3644 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3645 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3646 // Grab the preimage, if it exists, instead of cloning
3647 let mut reason = OutboundHTLCOutcome::Success(None);
3648 mem::swap(outcome, &mut reason);
3649 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3650 require_commitment = true;
3654 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3656 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3657 match update_state {
3658 FeeUpdateState::Outbound => {
3659 debug_assert!(self.context.is_outbound());
3660 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3661 self.context.feerate_per_kw = feerate;
3662 self.context.pending_update_fee = None;
3663 self.context.expecting_peer_commitment_signed = true;
3665 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3666 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3667 debug_assert!(!self.context.is_outbound());
3668 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3669 require_commitment = true;
3670 self.context.feerate_per_kw = feerate;
3671 self.context.pending_update_fee = None;
3676 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3677 let release_state_str =
3678 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3679 macro_rules! return_with_htlcs_to_fail {
3680 ($htlcs_to_fail: expr) => {
3681 if !release_monitor {
3682 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3683 update: monitor_update,
3685 return Ok(($htlcs_to_fail, None));
3687 return Ok(($htlcs_to_fail, Some(monitor_update)));
3692 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3693 // We can't actually generate a new commitment transaction (incl by freeing holding
3694 // cells) while we can't update the monitor, so we just return what we have.
3695 if require_commitment {
3696 self.context.monitor_pending_commitment_signed = true;
3697 // When the monitor updating is restored we'll call
3698 // get_last_commitment_update_for_send(), which does not update state, but we're
3699 // definitely now awaiting a remote revoke before we can step forward any more, so
3701 let mut additional_update = self.build_commitment_no_status_check(logger);
3702 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3703 // strictly increasing by one, so decrement it here.
3704 self.context.latest_monitor_update_id = monitor_update.update_id;
3705 monitor_update.updates.append(&mut additional_update.updates);
3707 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3708 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3709 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3710 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3711 return_with_htlcs_to_fail!(Vec::new());
3714 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3715 (Some(mut additional_update), htlcs_to_fail) => {
3716 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3717 // strictly increasing by one, so decrement it here.
3718 self.context.latest_monitor_update_id = monitor_update.update_id;
3719 monitor_update.updates.append(&mut additional_update.updates);
3721 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3722 &self.context.channel_id(), release_state_str);
3724 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3725 return_with_htlcs_to_fail!(htlcs_to_fail);
3727 (None, htlcs_to_fail) => {
3728 if require_commitment {
3729 let mut additional_update = self.build_commitment_no_status_check(logger);
3731 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3732 // strictly increasing by one, so decrement it here.
3733 self.context.latest_monitor_update_id = monitor_update.update_id;
3734 monitor_update.updates.append(&mut additional_update.updates);
3736 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3737 &self.context.channel_id(),
3738 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3741 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3742 return_with_htlcs_to_fail!(htlcs_to_fail);
3744 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3745 &self.context.channel_id(), release_state_str);
3747 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3748 return_with_htlcs_to_fail!(htlcs_to_fail);
3754 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3755 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3756 /// commitment update.
3757 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3758 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3759 where F::Target: FeeEstimator, L::Target: Logger
3761 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3762 assert!(msg_opt.is_none(), "We forced holding cell?");
3765 /// Adds a pending update to this channel. See the doc for send_htlc for
3766 /// further details on the optionness of the return value.
3767 /// If our balance is too low to cover the cost of the next commitment transaction at the
3768 /// new feerate, the update is cancelled.
3770 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3771 /// [`Channel`] if `force_holding_cell` is false.
3772 fn send_update_fee<F: Deref, L: Deref>(
3773 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3774 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3775 ) -> Option<msgs::UpdateFee>
3776 where F::Target: FeeEstimator, L::Target: Logger
3778 if !self.context.is_outbound() {
3779 panic!("Cannot send fee from inbound channel");
3781 if !self.context.is_usable() {
3782 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3784 if !self.context.is_live() {
3785 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3788 // Before proposing a feerate update, check that we can actually afford the new fee.
3789 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3790 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3791 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3792 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3793 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3794 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3795 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3796 //TODO: auto-close after a number of failures?
3797 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3801 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3802 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3803 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3804 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3805 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3806 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3809 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3810 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3814 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3815 force_holding_cell = true;
3818 if force_holding_cell {
3819 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3823 debug_assert!(self.context.pending_update_fee.is_none());
3824 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3826 Some(msgs::UpdateFee {
3827 channel_id: self.context.channel_id,
3832 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3833 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3835 /// No further message handling calls may be made until a channel_reestablish dance has
3837 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3838 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3839 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3840 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3844 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3845 // While the below code should be idempotent, it's simpler to just return early, as
3846 // redundant disconnect events can fire, though they should be rare.
3850 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3851 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3854 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3855 // will be retransmitted.
3856 self.context.last_sent_closing_fee = None;
3857 self.context.pending_counterparty_closing_signed = None;
3858 self.context.closing_fee_limits = None;
3860 let mut inbound_drop_count = 0;
3861 self.context.pending_inbound_htlcs.retain(|htlc| {
3863 InboundHTLCState::RemoteAnnounced(_) => {
3864 // They sent us an update_add_htlc but we never got the commitment_signed.
3865 // We'll tell them what commitment_signed we're expecting next and they'll drop
3866 // this HTLC accordingly
3867 inbound_drop_count += 1;
3870 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3871 // We received a commitment_signed updating this HTLC and (at least hopefully)
3872 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3873 // in response to it yet, so don't touch it.
3876 InboundHTLCState::Committed => true,
3877 InboundHTLCState::LocalRemoved(_) => {
3878 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3879 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3880 // (that we missed). Keep this around for now and if they tell us they missed
3881 // the commitment_signed we can re-transmit the update then.
3886 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3888 if let Some((_, update_state)) = self.context.pending_update_fee {
3889 if update_state == FeeUpdateState::RemoteAnnounced {
3890 debug_assert!(!self.context.is_outbound());
3891 self.context.pending_update_fee = None;
3895 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3896 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3897 // They sent us an update to remove this but haven't yet sent the corresponding
3898 // commitment_signed, we need to move it back to Committed and they can re-send
3899 // the update upon reconnection.
3900 htlc.state = OutboundHTLCState::Committed;
3904 self.context.sent_message_awaiting_response = None;
3906 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3907 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3911 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3912 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3913 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3914 /// update completes (potentially immediately).
3915 /// The messages which were generated with the monitor update must *not* have been sent to the
3916 /// remote end, and must instead have been dropped. They will be regenerated when
3917 /// [`Self::monitor_updating_restored`] is called.
3919 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3920 /// [`chain::Watch`]: crate::chain::Watch
3921 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3922 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3923 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3924 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3925 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3927 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3928 self.context.monitor_pending_commitment_signed |= resend_commitment;
3929 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3930 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3931 self.context.monitor_pending_failures.append(&mut pending_fails);
3932 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3933 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3936 /// Indicates that the latest ChannelMonitor update has been committed by the client
3937 /// successfully and we should restore normal operation. Returns messages which should be sent
3938 /// to the remote side.
3939 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3940 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3941 user_config: &UserConfig, best_block_height: u32
3942 ) -> MonitorRestoreUpdates
3945 NS::Target: NodeSigner
3947 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3948 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3950 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3951 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3952 // first received the funding_signed.
3953 let mut funding_broadcastable =
3954 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3955 self.context.funding_transaction.take()
3957 // That said, if the funding transaction is already confirmed (ie we're active with a
3958 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3959 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3960 funding_broadcastable = None;
3963 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3964 // (and we assume the user never directly broadcasts the funding transaction and waits for
3965 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3966 // * an inbound channel that failed to persist the monitor on funding_created and we got
3967 // the funding transaction confirmed before the monitor was persisted, or
3968 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3969 let channel_ready = if self.context.monitor_pending_channel_ready {
3970 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3971 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3972 self.context.monitor_pending_channel_ready = false;
3973 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3974 Some(msgs::ChannelReady {
3975 channel_id: self.context.channel_id(),
3976 next_per_commitment_point,
3977 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3981 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3983 let mut accepted_htlcs = Vec::new();
3984 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3985 let mut failed_htlcs = Vec::new();
3986 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3987 let mut finalized_claimed_htlcs = Vec::new();
3988 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3990 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3991 self.context.monitor_pending_revoke_and_ack = false;
3992 self.context.monitor_pending_commitment_signed = false;
3993 return MonitorRestoreUpdates {
3994 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3995 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
3999 let raa = if self.context.monitor_pending_revoke_and_ack {
4000 Some(self.get_last_revoke_and_ack())
4002 let commitment_update = if self.context.monitor_pending_commitment_signed {
4003 self.get_last_commitment_update_for_send(logger).ok()
4005 if commitment_update.is_some() {
4006 self.mark_awaiting_response();
4009 self.context.monitor_pending_revoke_and_ack = false;
4010 self.context.monitor_pending_commitment_signed = false;
4011 let order = self.context.resend_order.clone();
4012 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4013 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4014 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4015 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4016 MonitorRestoreUpdates {
4017 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4021 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4022 where F::Target: FeeEstimator, L::Target: Logger
4024 if self.context.is_outbound() {
4025 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4027 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4028 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4030 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4032 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4033 self.context.update_time_counter += 1;
4034 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4035 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4036 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4037 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4038 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4039 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4040 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4041 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4042 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4043 msg.feerate_per_kw, holder_tx_dust_exposure)));
4045 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4046 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4047 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4053 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4056 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4057 let commitment_update = if self.context.signer_pending_commitment_update {
4058 self.get_last_commitment_update_for_send(logger).ok()
4060 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4061 self.context.get_funding_signed_msg(logger).1
4063 let channel_ready = if funding_signed.is_some() {
4064 self.check_get_channel_ready(0)
4066 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4067 self.context.get_funding_created_msg(logger)
4070 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4071 if commitment_update.is_some() { "a" } else { "no" },
4072 if funding_signed.is_some() { "a" } else { "no" },
4073 if funding_created.is_some() { "a" } else { "no" },
4074 if channel_ready.is_some() { "a" } else { "no" });
4076 SignerResumeUpdates {
4084 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4085 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4086 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4087 msgs::RevokeAndACK {
4088 channel_id: self.context.channel_id,
4089 per_commitment_secret,
4090 next_per_commitment_point,
4092 next_local_nonce: None,
4096 /// Gets the last commitment update for immediate sending to our peer.
4097 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4098 let mut update_add_htlcs = Vec::new();
4099 let mut update_fulfill_htlcs = Vec::new();
4100 let mut update_fail_htlcs = Vec::new();
4101 let mut update_fail_malformed_htlcs = Vec::new();
4103 for htlc in self.context.pending_outbound_htlcs.iter() {
4104 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4105 update_add_htlcs.push(msgs::UpdateAddHTLC {
4106 channel_id: self.context.channel_id(),
4107 htlc_id: htlc.htlc_id,
4108 amount_msat: htlc.amount_msat,
4109 payment_hash: htlc.payment_hash,
4110 cltv_expiry: htlc.cltv_expiry,
4111 onion_routing_packet: (**onion_packet).clone(),
4112 skimmed_fee_msat: htlc.skimmed_fee_msat,
4113 blinding_point: htlc.blinding_point,
4118 for htlc in self.context.pending_inbound_htlcs.iter() {
4119 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4121 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4122 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4123 channel_id: self.context.channel_id(),
4124 htlc_id: htlc.htlc_id,
4125 reason: err_packet.clone()
4128 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4129 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4130 channel_id: self.context.channel_id(),
4131 htlc_id: htlc.htlc_id,
4132 sha256_of_onion: sha256_of_onion.clone(),
4133 failure_code: failure_code.clone(),
4136 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4137 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4138 channel_id: self.context.channel_id(),
4139 htlc_id: htlc.htlc_id,
4140 payment_preimage: payment_preimage.clone(),
4147 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4148 Some(msgs::UpdateFee {
4149 channel_id: self.context.channel_id(),
4150 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4154 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4155 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4156 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4157 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4158 if self.context.signer_pending_commitment_update {
4159 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4160 self.context.signer_pending_commitment_update = false;
4164 if !self.context.signer_pending_commitment_update {
4165 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4166 self.context.signer_pending_commitment_update = true;
4170 Ok(msgs::CommitmentUpdate {
4171 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4176 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4177 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4178 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4179 assert!(self.context.shutdown_scriptpubkey.is_some());
4180 Some(msgs::Shutdown {
4181 channel_id: self.context.channel_id,
4182 scriptpubkey: self.get_closing_scriptpubkey(),
4187 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4188 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4190 /// Some links printed in log lines are included here to check them during build (when run with
4191 /// `cargo doc --document-private-items`):
4192 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4193 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4194 pub fn channel_reestablish<L: Deref, NS: Deref>(
4195 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4196 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4197 ) -> Result<ReestablishResponses, ChannelError>
4200 NS::Target: NodeSigner
4202 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4203 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4204 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4205 // just close here instead of trying to recover.
4206 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4209 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4210 msg.next_local_commitment_number == 0 {
4211 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4214 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4215 if msg.next_remote_commitment_number > 0 {
4216 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4217 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4218 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4219 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4220 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4222 if msg.next_remote_commitment_number > our_commitment_transaction {
4223 macro_rules! log_and_panic {
4224 ($err_msg: expr) => {
4225 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4226 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4229 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4230 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4231 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4232 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4233 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4234 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4235 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4236 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4240 // Before we change the state of the channel, we check if the peer is sending a very old
4241 // commitment transaction number, if yes we send a warning message.
4242 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4243 return Err(ChannelError::Warn(format!(
4244 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4245 msg.next_remote_commitment_number,
4246 our_commitment_transaction
4250 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4251 // remaining cases either succeed or ErrorMessage-fail).
4252 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4253 self.context.sent_message_awaiting_response = None;
4255 let shutdown_msg = self.get_outbound_shutdown();
4257 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4259 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4260 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4261 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4262 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4263 if msg.next_remote_commitment_number != 0 {
4264 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4266 // Short circuit the whole handler as there is nothing we can resend them
4267 return Ok(ReestablishResponses {
4268 channel_ready: None,
4269 raa: None, commitment_update: None,
4270 order: RAACommitmentOrder::CommitmentFirst,
4271 shutdown_msg, announcement_sigs,
4275 // We have OurChannelReady set!
4276 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4277 return Ok(ReestablishResponses {
4278 channel_ready: Some(msgs::ChannelReady {
4279 channel_id: self.context.channel_id(),
4280 next_per_commitment_point,
4281 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4283 raa: None, commitment_update: None,
4284 order: RAACommitmentOrder::CommitmentFirst,
4285 shutdown_msg, announcement_sigs,
4289 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4290 // Remote isn't waiting on any RevokeAndACK from us!
4291 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4293 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4294 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4295 self.context.monitor_pending_revoke_and_ack = true;
4298 Some(self.get_last_revoke_and_ack())
4301 debug_assert!(false, "All values should have been handled in the four cases above");
4302 return Err(ChannelError::Close(format!(
4303 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4304 msg.next_remote_commitment_number,
4305 our_commitment_transaction
4309 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4310 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4311 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4312 // the corresponding revoke_and_ack back yet.
4313 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4314 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4315 self.mark_awaiting_response();
4317 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4319 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4320 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4321 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4322 Some(msgs::ChannelReady {
4323 channel_id: self.context.channel_id(),
4324 next_per_commitment_point,
4325 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4329 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4330 if required_revoke.is_some() {
4331 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4333 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4336 Ok(ReestablishResponses {
4337 channel_ready, shutdown_msg, announcement_sigs,
4338 raa: required_revoke,
4339 commitment_update: None,
4340 order: self.context.resend_order.clone(),
4342 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4343 if required_revoke.is_some() {
4344 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4346 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4349 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4350 self.context.monitor_pending_commitment_signed = true;
4351 Ok(ReestablishResponses {
4352 channel_ready, shutdown_msg, announcement_sigs,
4353 commitment_update: None, raa: None,
4354 order: self.context.resend_order.clone(),
4357 Ok(ReestablishResponses {
4358 channel_ready, shutdown_msg, announcement_sigs,
4359 raa: required_revoke,
4360 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4361 order: self.context.resend_order.clone(),
4364 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4365 Err(ChannelError::Close(format!(
4366 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4367 msg.next_local_commitment_number,
4368 next_counterparty_commitment_number,
4371 Err(ChannelError::Close(format!(
4372 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4373 msg.next_local_commitment_number,
4374 next_counterparty_commitment_number,
4379 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4380 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4381 /// at which point they will be recalculated.
4382 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4384 where F::Target: FeeEstimator
4386 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4388 // Propose a range from our current Background feerate to our Normal feerate plus our
4389 // force_close_avoidance_max_fee_satoshis.
4390 // If we fail to come to consensus, we'll have to force-close.
4391 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4392 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4393 // that we don't expect to need fee bumping
4394 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4395 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4397 // The spec requires that (when the channel does not have anchors) we only send absolute
4398 // channel fees no greater than the absolute channel fee on the current commitment
4399 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4400 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4401 // some force-closure by old nodes, but we wanted to close the channel anyway.
4403 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4404 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4405 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4406 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4409 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4410 // below our dust limit, causing the output to disappear. We don't bother handling this
4411 // case, however, as this should only happen if a channel is closed before any (material)
4412 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4413 // come to consensus with our counterparty on appropriate fees, however it should be a
4414 // relatively rare case. We can revisit this later, though note that in order to determine
4415 // if the funders' output is dust we have to know the absolute fee we're going to use.
4416 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4417 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4418 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4419 // We always add force_close_avoidance_max_fee_satoshis to our normal
4420 // feerate-calculated fee, but allow the max to be overridden if we're using a
4421 // target feerate-calculated fee.
4422 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4423 proposed_max_feerate as u64 * tx_weight / 1000)
4425 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4428 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4429 self.context.closing_fee_limits.clone().unwrap()
4432 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4433 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4434 /// this point if we're the funder we should send the initial closing_signed, and in any case
4435 /// shutdown should complete within a reasonable timeframe.
4436 fn closing_negotiation_ready(&self) -> bool {
4437 self.context.closing_negotiation_ready()
4440 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4441 /// an Err if no progress is being made and the channel should be force-closed instead.
4442 /// Should be called on a one-minute timer.
4443 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4444 if self.closing_negotiation_ready() {
4445 if self.context.closing_signed_in_flight {
4446 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4448 self.context.closing_signed_in_flight = true;
4454 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4455 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4456 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4457 where F::Target: FeeEstimator, L::Target: Logger
4459 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4460 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4461 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4462 // that closing_negotiation_ready checks this case (as well as a few others).
4463 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4464 return Ok((None, None, None));
4467 if !self.context.is_outbound() {
4468 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4469 return self.closing_signed(fee_estimator, &msg);
4471 return Ok((None, None, None));
4474 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4475 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4476 if self.context.expecting_peer_commitment_signed {
4477 return Ok((None, None, None));
4480 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4482 assert!(self.context.shutdown_scriptpubkey.is_some());
4483 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4484 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4485 our_min_fee, our_max_fee, total_fee_satoshis);
4487 match &self.context.holder_signer {
4488 ChannelSignerType::Ecdsa(ecdsa) => {
4490 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4491 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4493 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4494 Ok((Some(msgs::ClosingSigned {
4495 channel_id: self.context.channel_id,
4496 fee_satoshis: total_fee_satoshis,
4498 fee_range: Some(msgs::ClosingSignedFeeRange {
4499 min_fee_satoshis: our_min_fee,
4500 max_fee_satoshis: our_max_fee,
4504 // TODO (taproot|arik)
4510 // Marks a channel as waiting for a response from the counterparty. If it's not received
4511 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4513 fn mark_awaiting_response(&mut self) {
4514 self.context.sent_message_awaiting_response = Some(0);
4517 /// Determines whether we should disconnect the counterparty due to not receiving a response
4518 /// within our expected timeframe.
4520 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4521 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4522 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4525 // Don't disconnect when we're not waiting on a response.
4528 *ticks_elapsed += 1;
4529 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4533 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4534 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4536 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4537 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4539 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4540 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4541 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4542 // can do that via error message without getting a connection fail anyway...
4543 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4545 for htlc in self.context.pending_inbound_htlcs.iter() {
4546 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4547 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4550 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4552 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4553 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4556 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4557 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4558 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4561 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4564 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4565 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4566 // any further commitment updates after we set LocalShutdownSent.
4567 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4569 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4572 assert!(send_shutdown);
4573 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4574 Ok(scriptpubkey) => scriptpubkey,
4575 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4577 if !shutdown_scriptpubkey.is_compatible(their_features) {
4578 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4580 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4585 // From here on out, we may not fail!
4587 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4588 self.context.update_time_counter += 1;
4590 let monitor_update = if update_shutdown_script {
4591 self.context.latest_monitor_update_id += 1;
4592 let monitor_update = ChannelMonitorUpdate {
4593 update_id: self.context.latest_monitor_update_id,
4594 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4595 scriptpubkey: self.get_closing_scriptpubkey(),
4598 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4599 self.push_ret_blockable_mon_update(monitor_update)
4601 let shutdown = if send_shutdown {
4602 Some(msgs::Shutdown {
4603 channel_id: self.context.channel_id,
4604 scriptpubkey: self.get_closing_scriptpubkey(),
4608 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4609 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4610 // cell HTLCs and return them to fail the payment.
4611 self.context.holding_cell_update_fee = None;
4612 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4613 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4615 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4616 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4623 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4624 self.context.update_time_counter += 1;
4626 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4629 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4630 let mut tx = closing_tx.trust().built_transaction().clone();
4632 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4634 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4635 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4636 let mut holder_sig = sig.serialize_der().to_vec();
4637 holder_sig.push(EcdsaSighashType::All as u8);
4638 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4639 cp_sig.push(EcdsaSighashType::All as u8);
4640 if funding_key[..] < counterparty_funding_key[..] {
4641 tx.input[0].witness.push(holder_sig);
4642 tx.input[0].witness.push(cp_sig);
4644 tx.input[0].witness.push(cp_sig);
4645 tx.input[0].witness.push(holder_sig);
4648 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4652 pub fn closing_signed<F: Deref>(
4653 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4654 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4655 where F::Target: FeeEstimator
4657 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4658 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4660 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4661 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4663 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4664 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4666 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4667 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4670 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4671 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4674 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4675 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4676 return Ok((None, None, None));
4679 let funding_redeemscript = self.context.get_funding_redeemscript();
4680 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4681 if used_total_fee != msg.fee_satoshis {
4682 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4684 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4686 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4689 // The remote end may have decided to revoke their output due to inconsistent dust
4690 // limits, so check for that case by re-checking the signature here.
4691 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4692 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4693 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4697 for outp in closing_tx.trust().built_transaction().output.iter() {
4698 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4699 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4703 assert!(self.context.shutdown_scriptpubkey.is_some());
4704 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4705 if last_fee == msg.fee_satoshis {
4706 let shutdown_result = ShutdownResult {
4707 monitor_update: None,
4708 dropped_outbound_htlcs: Vec::new(),
4709 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4711 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4712 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4713 self.context.update_time_counter += 1;
4714 return Ok((None, Some(tx), Some(shutdown_result)));
4718 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4720 macro_rules! propose_fee {
4721 ($new_fee: expr) => {
4722 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4723 (closing_tx, $new_fee)
4725 self.build_closing_transaction($new_fee, false)
4728 return match &self.context.holder_signer {
4729 ChannelSignerType::Ecdsa(ecdsa) => {
4731 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4732 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4733 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4734 let shutdown_result = ShutdownResult {
4735 monitor_update: None,
4736 dropped_outbound_htlcs: Vec::new(),
4737 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4739 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4740 self.context.update_time_counter += 1;
4741 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4742 (Some(tx), Some(shutdown_result))
4747 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4748 Ok((Some(msgs::ClosingSigned {
4749 channel_id: self.context.channel_id,
4750 fee_satoshis: used_fee,
4752 fee_range: Some(msgs::ClosingSignedFeeRange {
4753 min_fee_satoshis: our_min_fee,
4754 max_fee_satoshis: our_max_fee,
4756 }), signed_tx, shutdown_result))
4758 // TODO (taproot|arik)
4765 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4766 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4767 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4769 if max_fee_satoshis < our_min_fee {
4770 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4772 if min_fee_satoshis > our_max_fee {
4773 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4776 if !self.context.is_outbound() {
4777 // They have to pay, so pick the highest fee in the overlapping range.
4778 // We should never set an upper bound aside from their full balance
4779 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4780 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4782 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4783 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4784 msg.fee_satoshis, our_min_fee, our_max_fee)));
4786 // The proposed fee is in our acceptable range, accept it and broadcast!
4787 propose_fee!(msg.fee_satoshis);
4790 // Old fee style negotiation. We don't bother to enforce whether they are complying
4791 // with the "making progress" requirements, we just comply and hope for the best.
4792 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4793 if msg.fee_satoshis > last_fee {
4794 if msg.fee_satoshis < our_max_fee {
4795 propose_fee!(msg.fee_satoshis);
4796 } else if last_fee < our_max_fee {
4797 propose_fee!(our_max_fee);
4799 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4802 if msg.fee_satoshis > our_min_fee {
4803 propose_fee!(msg.fee_satoshis);
4804 } else if last_fee > our_min_fee {
4805 propose_fee!(our_min_fee);
4807 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4811 if msg.fee_satoshis < our_min_fee {
4812 propose_fee!(our_min_fee);
4813 } else if msg.fee_satoshis > our_max_fee {
4814 propose_fee!(our_max_fee);
4816 propose_fee!(msg.fee_satoshis);
4822 fn internal_htlc_satisfies_config(
4823 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4824 ) -> Result<(), (&'static str, u16)> {
4825 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4826 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4827 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4828 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4830 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4831 0x1000 | 12, // fee_insufficient
4834 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4836 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4837 0x1000 | 13, // incorrect_cltv_expiry
4843 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4844 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4845 /// unsuccessful, falls back to the previous one if one exists.
4846 pub fn htlc_satisfies_config(
4847 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4848 ) -> Result<(), (&'static str, u16)> {
4849 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4851 if let Some(prev_config) = self.context.prev_config() {
4852 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4859 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4860 self.context.cur_holder_commitment_transaction_number + 1
4863 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4864 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4867 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4868 self.context.cur_counterparty_commitment_transaction_number + 2
4872 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
4873 &self.context.holder_signer
4877 pub fn get_value_stat(&self) -> ChannelValueStat {
4879 value_to_self_msat: self.context.value_to_self_msat,
4880 channel_value_msat: self.context.channel_value_satoshis * 1000,
4881 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4882 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4883 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4884 holding_cell_outbound_amount_msat: {
4886 for h in self.context.holding_cell_htlc_updates.iter() {
4888 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4896 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4897 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4901 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4902 /// Allowed in any state (including after shutdown)
4903 pub fn is_awaiting_monitor_update(&self) -> bool {
4904 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4907 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4908 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4909 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4910 self.context.blocked_monitor_updates[0].update.update_id - 1
4913 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4914 /// further blocked monitor update exists after the next.
4915 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4916 if self.context.blocked_monitor_updates.is_empty() { return None; }
4917 Some((self.context.blocked_monitor_updates.remove(0).update,
4918 !self.context.blocked_monitor_updates.is_empty()))
4921 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4922 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4923 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4924 -> Option<ChannelMonitorUpdate> {
4925 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4926 if !release_monitor {
4927 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4936 pub fn blocked_monitor_updates_pending(&self) -> usize {
4937 self.context.blocked_monitor_updates.len()
4940 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4941 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4942 /// transaction. If the channel is inbound, this implies simply that the channel has not
4944 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4945 if !self.is_awaiting_monitor_update() { return false; }
4946 if self.context.channel_state &
4947 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4948 == ChannelState::FundingSent as u32 {
4949 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4950 // FundingSent set, though our peer could have sent their channel_ready.
4951 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4954 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4955 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4956 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4957 // waiting for the initial monitor persistence. Thus, we check if our commitment
4958 // transaction numbers have both been iterated only exactly once (for the
4959 // funding_signed), and we're awaiting monitor update.
4961 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4962 // only way to get an awaiting-monitor-update state during initial funding is if the
4963 // initial monitor persistence is still pending).
4965 // Because deciding we're awaiting initial broadcast spuriously could result in
4966 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4967 // we hard-assert here, even in production builds.
4968 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4969 assert!(self.context.monitor_pending_channel_ready);
4970 assert_eq!(self.context.latest_monitor_update_id, 0);
4976 /// Returns true if our channel_ready has been sent
4977 pub fn is_our_channel_ready(&self) -> bool {
4978 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4981 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4982 pub fn received_shutdown(&self) -> bool {
4983 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4986 /// Returns true if we either initiated or agreed to shut down the channel.
4987 pub fn sent_shutdown(&self) -> bool {
4988 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4991 /// Returns true if this channel is fully shut down. True here implies that no further actions
4992 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
4993 /// will be handled appropriately by the chain monitor.
4994 pub fn is_shutdown(&self) -> bool {
4995 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
4996 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
5001 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5002 self.context.channel_update_status
5005 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5006 self.context.update_time_counter += 1;
5007 self.context.channel_update_status = status;
5010 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5012 // * always when a new block/transactions are confirmed with the new height
5013 // * when funding is signed with a height of 0
5014 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5018 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5019 if funding_tx_confirmations <= 0 {
5020 self.context.funding_tx_confirmation_height = 0;
5023 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5027 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5028 // channel_ready yet.
5029 if self.context.signer_pending_funding {
5033 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5034 // channel_ready until the entire batch is ready.
5035 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5036 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5037 self.context.channel_state |= ChannelState::OurChannelReady as u32;
5039 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5040 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
5041 self.context.update_time_counter += 1;
5043 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5044 // We got a reorg but not enough to trigger a force close, just ignore.
5047 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
5048 // We should never see a funding transaction on-chain until we've received
5049 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5050 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5051 // however, may do this and we shouldn't treat it as a bug.
5052 #[cfg(not(fuzzing))]
5053 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5054 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5055 self.context.channel_state);
5057 // We got a reorg but not enough to trigger a force close, just ignore.
5061 if need_commitment_update {
5062 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5063 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5064 let next_per_commitment_point =
5065 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5066 return Some(msgs::ChannelReady {
5067 channel_id: self.context.channel_id,
5068 next_per_commitment_point,
5069 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5073 self.context.monitor_pending_channel_ready = true;
5079 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5080 /// In the first case, we store the confirmation height and calculating the short channel id.
5081 /// In the second, we simply return an Err indicating we need to be force-closed now.
5082 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5083 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5084 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5085 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5087 NS::Target: NodeSigner,
5090 let mut msgs = (None, None);
5091 if let Some(funding_txo) = self.context.get_funding_txo() {
5092 for &(index_in_block, tx) in txdata.iter() {
5093 // Check if the transaction is the expected funding transaction, and if it is,
5094 // check that it pays the right amount to the right script.
5095 if self.context.funding_tx_confirmation_height == 0 {
5096 if tx.txid() == funding_txo.txid {
5097 let txo_idx = funding_txo.index as usize;
5098 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5099 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5100 if self.context.is_outbound() {
5101 // If we generated the funding transaction and it doesn't match what it
5102 // should, the client is really broken and we should just panic and
5103 // tell them off. That said, because hash collisions happen with high
5104 // probability in fuzzing mode, if we're fuzzing we just close the
5105 // channel and move on.
5106 #[cfg(not(fuzzing))]
5107 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5109 self.context.update_time_counter += 1;
5110 let err_reason = "funding tx had wrong script/value or output index";
5111 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5113 if self.context.is_outbound() {
5114 if !tx.is_coin_base() {
5115 for input in tx.input.iter() {
5116 if input.witness.is_empty() {
5117 // We generated a malleable funding transaction, implying we've
5118 // just exposed ourselves to funds loss to our counterparty.
5119 #[cfg(not(fuzzing))]
5120 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5125 self.context.funding_tx_confirmation_height = height;
5126 self.context.funding_tx_confirmed_in = Some(*block_hash);
5127 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5128 Ok(scid) => Some(scid),
5129 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5132 // If this is a coinbase transaction and not a 0-conf channel
5133 // we should update our min_depth to 100 to handle coinbase maturity
5134 if tx.is_coin_base() &&
5135 self.context.minimum_depth.unwrap_or(0) > 0 &&
5136 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5137 self.context.minimum_depth = Some(COINBASE_MATURITY);
5140 // If we allow 1-conf funding, we may need to check for channel_ready here and
5141 // send it immediately instead of waiting for a best_block_updated call (which
5142 // may have already happened for this block).
5143 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5144 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5145 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5146 msgs = (Some(channel_ready), announcement_sigs);
5149 for inp in tx.input.iter() {
5150 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5151 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5152 return Err(ClosureReason::CommitmentTxConfirmed);
5160 /// When a new block is connected, we check the height of the block against outbound holding
5161 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5162 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5163 /// handled by the ChannelMonitor.
5165 /// If we return Err, the channel may have been closed, at which point the standard
5166 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5169 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5171 pub fn best_block_updated<NS: Deref, L: Deref>(
5172 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5173 node_signer: &NS, user_config: &UserConfig, logger: &L
5174 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5176 NS::Target: NodeSigner,
5179 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5182 fn do_best_block_updated<NS: Deref, L: Deref>(
5183 &mut self, height: u32, highest_header_time: u32,
5184 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5185 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5187 NS::Target: NodeSigner,
5190 let mut timed_out_htlcs = Vec::new();
5191 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5192 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5194 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5195 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5197 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5198 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5199 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5207 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5209 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5210 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5211 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5213 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5214 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5217 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5218 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5219 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5220 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5221 if self.context.funding_tx_confirmation_height == 0 {
5222 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5223 // zero if it has been reorged out, however in either case, our state flags
5224 // indicate we've already sent a channel_ready
5225 funding_tx_confirmations = 0;
5228 // If we've sent channel_ready (or have both sent and received channel_ready), and
5229 // the funding transaction has become unconfirmed,
5230 // close the channel and hope we can get the latest state on chain (because presumably
5231 // the funding transaction is at least still in the mempool of most nodes).
5233 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5234 // 0-conf channel, but not doing so may lead to the
5235 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5237 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5238 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5239 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5240 return Err(ClosureReason::ProcessingError { err: err_reason });
5242 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5243 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5244 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5245 // If funding_tx_confirmed_in is unset, the channel must not be active
5246 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5247 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5248 return Err(ClosureReason::FundingTimedOut);
5251 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5252 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5254 Ok((None, timed_out_htlcs, announcement_sigs))
5257 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5258 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5259 /// before the channel has reached channel_ready and we can just wait for more blocks.
5260 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5261 if self.context.funding_tx_confirmation_height != 0 {
5262 // We handle the funding disconnection by calling best_block_updated with a height one
5263 // below where our funding was connected, implying a reorg back to conf_height - 1.
5264 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5265 // We use the time field to bump the current time we set on channel updates if its
5266 // larger. If we don't know that time has moved forward, we can just set it to the last
5267 // time we saw and it will be ignored.
5268 let best_time = self.context.update_time_counter;
5269 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5270 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5271 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5272 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5273 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5279 // We never learned about the funding confirmation anyway, just ignore
5284 // Methods to get unprompted messages to send to the remote end (or where we already returned
5285 // something in the handler for the message that prompted this message):
5287 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5288 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5289 /// directions). Should be used for both broadcasted announcements and in response to an
5290 /// AnnouncementSignatures message from the remote peer.
5292 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5295 /// This will only return ChannelError::Ignore upon failure.
5297 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5298 fn get_channel_announcement<NS: Deref>(
5299 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5300 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5301 if !self.context.config.announced_channel {
5302 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5304 if !self.context.is_usable() {
5305 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5308 let short_channel_id = self.context.get_short_channel_id()
5309 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5310 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5311 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5312 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5313 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5315 let msg = msgs::UnsignedChannelAnnouncement {
5316 features: channelmanager::provided_channel_features(&user_config),
5319 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5320 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5321 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5322 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5323 excess_data: Vec::new(),
5329 fn get_announcement_sigs<NS: Deref, L: Deref>(
5330 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5331 best_block_height: u32, logger: &L
5332 ) -> Option<msgs::AnnouncementSignatures>
5334 NS::Target: NodeSigner,
5337 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5341 if !self.context.is_usable() {
5345 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5346 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5350 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5354 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5355 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5358 log_trace!(logger, "{:?}", e);
5362 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5364 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5369 match &self.context.holder_signer {
5370 ChannelSignerType::Ecdsa(ecdsa) => {
5371 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5373 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5378 let short_channel_id = match self.context.get_short_channel_id() {
5380 None => return None,
5383 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5385 Some(msgs::AnnouncementSignatures {
5386 channel_id: self.context.channel_id(),
5388 node_signature: our_node_sig,
5389 bitcoin_signature: our_bitcoin_sig,
5392 // TODO (taproot|arik)
5398 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5400 fn sign_channel_announcement<NS: Deref>(
5401 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5402 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5403 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5404 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5405 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5406 let were_node_one = announcement.node_id_1 == our_node_key;
5408 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5409 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5410 match &self.context.holder_signer {
5411 ChannelSignerType::Ecdsa(ecdsa) => {
5412 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5413 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5414 Ok(msgs::ChannelAnnouncement {
5415 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5416 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5417 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5418 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5419 contents: announcement,
5422 // TODO (taproot|arik)
5427 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5431 /// Processes an incoming announcement_signatures message, providing a fully-signed
5432 /// channel_announcement message which we can broadcast and storing our counterparty's
5433 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5434 pub fn announcement_signatures<NS: Deref>(
5435 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5436 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5437 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5438 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5440 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5442 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5443 return Err(ChannelError::Close(format!(
5444 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5445 &announcement, self.context.get_counterparty_node_id())));
5447 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5448 return Err(ChannelError::Close(format!(
5449 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5450 &announcement, self.context.counterparty_funding_pubkey())));
5453 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5454 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5455 return Err(ChannelError::Ignore(
5456 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5459 self.sign_channel_announcement(node_signer, announcement)
5462 /// Gets a signed channel_announcement for this channel, if we previously received an
5463 /// announcement_signatures from our counterparty.
5464 pub fn get_signed_channel_announcement<NS: Deref>(
5465 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5466 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5467 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5470 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5472 Err(_) => return None,
5474 match self.sign_channel_announcement(node_signer, announcement) {
5475 Ok(res) => Some(res),
5480 /// May panic if called on a channel that wasn't immediately-previously
5481 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5482 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5483 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5484 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5485 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5486 // current to_remote balances. However, it no longer has any use, and thus is now simply
5487 // set to a dummy (but valid, as required by the spec) public key.
5488 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5489 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5490 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5491 let mut pk = [2; 33]; pk[1] = 0xff;
5492 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5493 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5494 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5495 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5498 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5501 self.mark_awaiting_response();
5502 msgs::ChannelReestablish {
5503 channel_id: self.context.channel_id(),
5504 // The protocol has two different commitment number concepts - the "commitment
5505 // transaction number", which starts from 0 and counts up, and the "revocation key
5506 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5507 // commitment transaction numbers by the index which will be used to reveal the
5508 // revocation key for that commitment transaction, which means we have to convert them
5509 // to protocol-level commitment numbers here...
5511 // next_local_commitment_number is the next commitment_signed number we expect to
5512 // receive (indicating if they need to resend one that we missed).
5513 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5514 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5515 // receive, however we track it by the next commitment number for a remote transaction
5516 // (which is one further, as they always revoke previous commitment transaction, not
5517 // the one we send) so we have to decrement by 1. Note that if
5518 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5519 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5521 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5522 your_last_per_commitment_secret: remote_last_secret,
5523 my_current_per_commitment_point: dummy_pubkey,
5524 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5525 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5526 // txid of that interactive transaction, else we MUST NOT set it.
5527 next_funding_txid: None,
5532 // Send stuff to our remote peers:
5534 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5535 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5536 /// commitment update.
5538 /// `Err`s will only be [`ChannelError::Ignore`].
5539 pub fn queue_add_htlc<F: Deref, L: Deref>(
5540 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5541 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5542 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5543 ) -> Result<(), ChannelError>
5544 where F::Target: FeeEstimator, L::Target: Logger
5547 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5548 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5549 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5551 if let ChannelError::Ignore(_) = err { /* fine */ }
5552 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5557 /// Adds a pending outbound HTLC to this channel, note that you probably want
5558 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5560 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5562 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5563 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5565 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5566 /// we may not yet have sent the previous commitment update messages and will need to
5567 /// regenerate them.
5569 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5570 /// on this [`Channel`] if `force_holding_cell` is false.
5572 /// `Err`s will only be [`ChannelError::Ignore`].
5573 fn send_htlc<F: Deref, L: Deref>(
5574 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5575 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5576 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5577 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5578 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5579 where F::Target: FeeEstimator, L::Target: Logger
5581 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5582 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5584 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5585 if amount_msat > channel_total_msat {
5586 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5589 if amount_msat == 0 {
5590 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5593 let available_balances = self.context.get_available_balances(fee_estimator);
5594 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5595 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5596 available_balances.next_outbound_htlc_minimum_msat)));
5599 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5600 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5601 available_balances.next_outbound_htlc_limit_msat)));
5604 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5605 // Note that this should never really happen, if we're !is_live() on receipt of an
5606 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5607 // the user to send directly into a !is_live() channel. However, if we
5608 // disconnected during the time the previous hop was doing the commitment dance we may
5609 // end up getting here after the forwarding delay. In any case, returning an
5610 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5611 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5614 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5615 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5616 payment_hash, amount_msat,
5617 if force_holding_cell { "into holding cell" }
5618 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5619 else { "to peer" });
5621 if need_holding_cell {
5622 force_holding_cell = true;
5625 // Now update local state:
5626 if force_holding_cell {
5627 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5632 onion_routing_packet,
5639 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5640 htlc_id: self.context.next_holder_htlc_id,
5642 payment_hash: payment_hash.clone(),
5644 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5650 let res = msgs::UpdateAddHTLC {
5651 channel_id: self.context.channel_id,
5652 htlc_id: self.context.next_holder_htlc_id,
5656 onion_routing_packet,
5660 self.context.next_holder_htlc_id += 1;
5665 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5666 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5667 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5668 // fail to generate this, we still are at least at a position where upgrading their status
5670 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5671 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5672 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5674 if let Some(state) = new_state {
5675 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5679 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5680 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5681 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5682 // Grab the preimage, if it exists, instead of cloning
5683 let mut reason = OutboundHTLCOutcome::Success(None);
5684 mem::swap(outcome, &mut reason);
5685 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5688 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5689 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5690 debug_assert!(!self.context.is_outbound());
5691 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5692 self.context.feerate_per_kw = feerate;
5693 self.context.pending_update_fee = None;
5696 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5698 let (mut htlcs_ref, counterparty_commitment_tx) =
5699 self.build_commitment_no_state_update(logger);
5700 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5701 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5702 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5704 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5705 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5708 self.context.latest_monitor_update_id += 1;
5709 let monitor_update = ChannelMonitorUpdate {
5710 update_id: self.context.latest_monitor_update_id,
5711 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5712 commitment_txid: counterparty_commitment_txid,
5713 htlc_outputs: htlcs.clone(),
5714 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5715 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5716 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5717 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5718 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5721 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5725 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5726 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5727 where L::Target: Logger
5729 let counterparty_keys = self.context.build_remote_transaction_keys();
5730 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5731 let counterparty_commitment_tx = commitment_stats.tx;
5733 #[cfg(any(test, fuzzing))]
5735 if !self.context.is_outbound() {
5736 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5737 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5738 if let Some(info) = projected_commit_tx_info {
5739 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5740 if info.total_pending_htlcs == total_pending_htlcs
5741 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5742 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5743 && info.feerate == self.context.feerate_per_kw {
5744 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5745 assert_eq!(actual_fee, info.fee);
5751 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5754 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5755 /// generation when we shouldn't change HTLC/channel state.
5756 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5757 // Get the fee tests from `build_commitment_no_state_update`
5758 #[cfg(any(test, fuzzing))]
5759 self.build_commitment_no_state_update(logger);
5761 let counterparty_keys = self.context.build_remote_transaction_keys();
5762 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5763 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5765 match &self.context.holder_signer {
5766 ChannelSignerType::Ecdsa(ecdsa) => {
5767 let (signature, htlc_signatures);
5770 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5771 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5775 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5776 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5778 htlc_signatures = res.1;
5780 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5781 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5782 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5783 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5785 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5786 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5787 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5788 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5789 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5790 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5794 Ok((msgs::CommitmentSigned {
5795 channel_id: self.context.channel_id,
5799 partial_signature_with_nonce: None,
5800 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5802 // TODO (taproot|arik)
5808 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5809 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5811 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5812 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5813 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5814 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5815 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5816 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5817 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5818 where F::Target: FeeEstimator, L::Target: Logger
5820 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5821 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
5822 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5825 let monitor_update = self.build_commitment_no_status_check(logger);
5826 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5827 Ok(self.push_ret_blockable_mon_update(monitor_update))
5833 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5835 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5836 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5837 fee_base_msat: msg.contents.fee_base_msat,
5838 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5839 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5841 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5843 self.context.counterparty_forwarding_info = new_forwarding_info;
5849 /// Begins the shutdown process, getting a message for the remote peer and returning all
5850 /// holding cell HTLCs for payment failure.
5852 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5853 /// [`ChannelMonitorUpdate`] will be returned).
5854 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5855 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5856 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5858 for htlc in self.context.pending_outbound_htlcs.iter() {
5859 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5860 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5863 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5864 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5865 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5867 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5868 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5871 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5872 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5874 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5875 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5876 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5879 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5880 // script is set, we just force-close and call it a day.
5881 let mut chan_closed = false;
5882 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5886 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5888 None if !chan_closed => {
5889 // use override shutdown script if provided
5890 let shutdown_scriptpubkey = match override_shutdown_script {
5891 Some(script) => script,
5893 // otherwise, use the shutdown scriptpubkey provided by the signer
5894 match signer_provider.get_shutdown_scriptpubkey() {
5895 Ok(scriptpubkey) => scriptpubkey,
5896 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5900 if !shutdown_scriptpubkey.is_compatible(their_features) {
5901 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5903 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5909 // From here on out, we may not fail!
5910 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5911 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5912 let shutdown_result = ShutdownResult {
5913 monitor_update: None,
5914 dropped_outbound_htlcs: Vec::new(),
5915 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5917 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5918 Some(shutdown_result)
5920 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5923 self.context.update_time_counter += 1;
5925 let monitor_update = if update_shutdown_script {
5926 self.context.latest_monitor_update_id += 1;
5927 let monitor_update = ChannelMonitorUpdate {
5928 update_id: self.context.latest_monitor_update_id,
5929 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5930 scriptpubkey: self.get_closing_scriptpubkey(),
5933 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5934 self.push_ret_blockable_mon_update(monitor_update)
5936 let shutdown = msgs::Shutdown {
5937 channel_id: self.context.channel_id,
5938 scriptpubkey: self.get_closing_scriptpubkey(),
5941 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5942 // our shutdown until we've committed all of the pending changes.
5943 self.context.holding_cell_update_fee = None;
5944 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5945 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5947 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5948 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5955 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5956 "we can't both complete shutdown and return a monitor update");
5958 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5961 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5962 self.context.holding_cell_htlc_updates.iter()
5963 .flat_map(|htlc_update| {
5965 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5966 => Some((source, payment_hash)),
5970 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5974 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5975 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5976 pub context: ChannelContext<SP>,
5977 pub unfunded_context: UnfundedChannelContext,
5980 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5981 pub fn new<ES: Deref, F: Deref>(
5982 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5983 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5984 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
5985 ) -> Result<OutboundV1Channel<SP>, APIError>
5986 where ES::Target: EntropySource,
5987 F::Target: FeeEstimator
5989 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
5990 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
5991 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
5992 let pubkeys = holder_signer.pubkeys().clone();
5994 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
5995 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
5997 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
5998 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6000 let channel_value_msat = channel_value_satoshis * 1000;
6001 if push_msat > channel_value_msat {
6002 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6004 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6005 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6007 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6008 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6009 // Protocol level safety check in place, although it should never happen because
6010 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6011 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6014 let channel_type = Self::get_initial_channel_type(&config, their_features);
6015 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6017 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6018 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6020 (ConfirmationTarget::NonAnchorChannelFee, 0)
6022 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6024 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6025 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6026 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6027 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6030 let mut secp_ctx = Secp256k1::new();
6031 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6033 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6034 match signer_provider.get_shutdown_scriptpubkey() {
6035 Ok(scriptpubkey) => Some(scriptpubkey),
6036 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6040 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6041 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6042 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6046 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6047 Ok(script) => script,
6048 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6051 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6054 context: ChannelContext {
6057 config: LegacyChannelConfig {
6058 options: config.channel_config.clone(),
6059 announced_channel: config.channel_handshake_config.announced_channel,
6060 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6065 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6067 channel_id: temporary_channel_id,
6068 temporary_channel_id: Some(temporary_channel_id),
6069 channel_state: ChannelState::OurInitSent as u32,
6070 announcement_sigs_state: AnnouncementSigsState::NotSent,
6072 channel_value_satoshis,
6074 latest_monitor_update_id: 0,
6076 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6077 shutdown_scriptpubkey,
6080 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6081 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6084 pending_inbound_htlcs: Vec::new(),
6085 pending_outbound_htlcs: Vec::new(),
6086 holding_cell_htlc_updates: Vec::new(),
6087 pending_update_fee: None,
6088 holding_cell_update_fee: None,
6089 next_holder_htlc_id: 0,
6090 next_counterparty_htlc_id: 0,
6091 update_time_counter: 1,
6093 resend_order: RAACommitmentOrder::CommitmentFirst,
6095 monitor_pending_channel_ready: false,
6096 monitor_pending_revoke_and_ack: false,
6097 monitor_pending_commitment_signed: false,
6098 monitor_pending_forwards: Vec::new(),
6099 monitor_pending_failures: Vec::new(),
6100 monitor_pending_finalized_fulfills: Vec::new(),
6102 signer_pending_commitment_update: false,
6103 signer_pending_funding: false,
6105 #[cfg(debug_assertions)]
6106 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6107 #[cfg(debug_assertions)]
6108 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6110 last_sent_closing_fee: None,
6111 pending_counterparty_closing_signed: None,
6112 expecting_peer_commitment_signed: false,
6113 closing_fee_limits: None,
6114 target_closing_feerate_sats_per_kw: None,
6116 funding_tx_confirmed_in: None,
6117 funding_tx_confirmation_height: 0,
6118 short_channel_id: None,
6119 channel_creation_height: current_chain_height,
6121 feerate_per_kw: commitment_feerate,
6122 counterparty_dust_limit_satoshis: 0,
6123 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6124 counterparty_max_htlc_value_in_flight_msat: 0,
6125 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6126 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6127 holder_selected_channel_reserve_satoshis,
6128 counterparty_htlc_minimum_msat: 0,
6129 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6130 counterparty_max_accepted_htlcs: 0,
6131 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6132 minimum_depth: None, // Filled in in accept_channel
6134 counterparty_forwarding_info: None,
6136 channel_transaction_parameters: ChannelTransactionParameters {
6137 holder_pubkeys: pubkeys,
6138 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6139 is_outbound_from_holder: true,
6140 counterparty_parameters: None,
6141 funding_outpoint: None,
6142 channel_type_features: channel_type.clone()
6144 funding_transaction: None,
6145 is_batch_funding: None,
6147 counterparty_cur_commitment_point: None,
6148 counterparty_prev_commitment_point: None,
6149 counterparty_node_id,
6151 counterparty_shutdown_scriptpubkey: None,
6153 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6155 channel_update_status: ChannelUpdateStatus::Enabled,
6156 closing_signed_in_flight: false,
6158 announcement_sigs: None,
6160 #[cfg(any(test, fuzzing))]
6161 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6162 #[cfg(any(test, fuzzing))]
6163 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6165 workaround_lnd_bug_4006: None,
6166 sent_message_awaiting_response: None,
6168 latest_inbound_scid_alias: None,
6169 outbound_scid_alias,
6171 channel_pending_event_emitted: false,
6172 channel_ready_event_emitted: false,
6174 #[cfg(any(test, fuzzing))]
6175 historical_inbound_htlc_fulfills: HashSet::new(),
6180 blocked_monitor_updates: Vec::new(),
6182 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6186 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6187 /// a funding_created message for the remote peer.
6188 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6189 /// or if called on an inbound channel.
6190 /// Note that channel_id changes during this call!
6191 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6192 /// If an Err is returned, it is a ChannelError::Close.
6193 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6194 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6195 if !self.context.is_outbound() {
6196 panic!("Tried to create outbound funding_created message on an inbound channel!");
6198 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6199 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6201 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6202 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6203 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6204 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6207 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6208 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6210 // Now that we're past error-generating stuff, update our local state:
6212 self.context.channel_state = ChannelState::FundingCreated as u32;
6213 self.context.channel_id = funding_txo.to_channel_id();
6215 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6216 // We can skip this if it is a zero-conf channel.
6217 if funding_transaction.is_coin_base() &&
6218 self.context.minimum_depth.unwrap_or(0) > 0 &&
6219 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6220 self.context.minimum_depth = Some(COINBASE_MATURITY);
6223 self.context.funding_transaction = Some(funding_transaction);
6224 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6226 let funding_created = self.context.get_funding_created_msg(logger);
6227 if funding_created.is_none() {
6228 if !self.context.signer_pending_funding {
6229 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6230 self.context.signer_pending_funding = true;
6234 let channel = Channel {
6235 context: self.context,
6238 Ok((channel, funding_created))
6241 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6242 // The default channel type (ie the first one we try) depends on whether the channel is
6243 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6244 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6245 // with no other changes, and fall back to `only_static_remotekey`.
6246 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6247 if !config.channel_handshake_config.announced_channel &&
6248 config.channel_handshake_config.negotiate_scid_privacy &&
6249 their_features.supports_scid_privacy() {
6250 ret.set_scid_privacy_required();
6253 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6254 // set it now. If they don't understand it, we'll fall back to our default of
6255 // `only_static_remotekey`.
6256 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6257 their_features.supports_anchors_zero_fee_htlc_tx() {
6258 ret.set_anchors_zero_fee_htlc_tx_required();
6264 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6265 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6266 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6267 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6268 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6269 ) -> Result<msgs::OpenChannel, ()>
6271 F::Target: FeeEstimator
6273 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6274 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6275 // We've exhausted our options
6278 // We support opening a few different types of channels. Try removing our additional
6279 // features one by one until we've either arrived at our default or the counterparty has
6282 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6283 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6284 // checks whether the counterparty supports every feature, this would only happen if the
6285 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6287 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6288 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6289 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6290 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6291 } else if self.context.channel_type.supports_scid_privacy() {
6292 self.context.channel_type.clear_scid_privacy();
6294 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6296 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6297 Ok(self.get_open_channel(chain_hash))
6300 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6301 if !self.context.is_outbound() {
6302 panic!("Tried to open a channel for an inbound channel?");
6304 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6305 panic!("Cannot generate an open_channel after we've moved forward");
6308 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6309 panic!("Tried to send an open_channel for a channel that has already advanced");
6312 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6313 let keys = self.context.get_holder_pubkeys();
6317 temporary_channel_id: self.context.channel_id,
6318 funding_satoshis: self.context.channel_value_satoshis,
6319 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6320 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6321 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6322 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6323 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6324 feerate_per_kw: self.context.feerate_per_kw as u32,
6325 to_self_delay: self.context.get_holder_selected_contest_delay(),
6326 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6327 funding_pubkey: keys.funding_pubkey,
6328 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6329 payment_point: keys.payment_point,
6330 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6331 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6332 first_per_commitment_point,
6333 channel_flags: if self.context.config.announced_channel {1} else {0},
6334 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6335 Some(script) => script.clone().into_inner(),
6336 None => Builder::new().into_script(),
6338 channel_type: Some(self.context.channel_type.clone()),
6343 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6344 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6346 // Check sanity of message fields:
6347 if !self.context.is_outbound() {
6348 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6350 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6351 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6353 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6354 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6356 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6357 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6359 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6360 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6362 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6363 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6364 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6366 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6367 if msg.htlc_minimum_msat >= full_channel_value_msat {
6368 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6370 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6371 if msg.to_self_delay > max_delay_acceptable {
6372 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6374 if msg.max_accepted_htlcs < 1 {
6375 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6377 if msg.max_accepted_htlcs > MAX_HTLCS {
6378 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6381 // Now check against optional parameters as set by config...
6382 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6383 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6385 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6386 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6388 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6389 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6391 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6392 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6394 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6395 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6397 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6398 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6400 if msg.minimum_depth > peer_limits.max_minimum_depth {
6401 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6404 if let Some(ty) = &msg.channel_type {
6405 if *ty != self.context.channel_type {
6406 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6408 } else if their_features.supports_channel_type() {
6409 // Assume they've accepted the channel type as they said they understand it.
6411 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6412 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6413 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6415 self.context.channel_type = channel_type.clone();
6416 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6419 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6420 match &msg.shutdown_scriptpubkey {
6421 &Some(ref script) => {
6422 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6423 if script.len() == 0 {
6426 if !script::is_bolt2_compliant(&script, their_features) {
6427 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6429 Some(script.clone())
6432 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6434 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6439 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6440 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6441 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6442 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6443 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6445 if peer_limits.trust_own_funding_0conf {
6446 self.context.minimum_depth = Some(msg.minimum_depth);
6448 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6451 let counterparty_pubkeys = ChannelPublicKeys {
6452 funding_pubkey: msg.funding_pubkey,
6453 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6454 payment_point: msg.payment_point,
6455 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6456 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6459 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6460 selected_contest_delay: msg.to_self_delay,
6461 pubkeys: counterparty_pubkeys,
6464 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6465 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6467 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6468 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6474 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6475 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6476 pub context: ChannelContext<SP>,
6477 pub unfunded_context: UnfundedChannelContext,
6480 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6481 /// Creates a new channel from a remote sides' request for one.
6482 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6483 pub fn new<ES: Deref, F: Deref, L: Deref>(
6484 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6485 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6486 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6487 current_chain_height: u32, logger: &L, is_0conf: bool,
6488 ) -> Result<InboundV1Channel<SP>, ChannelError>
6489 where ES::Target: EntropySource,
6490 F::Target: FeeEstimator,
6493 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6494 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6496 // First check the channel type is known, failing before we do anything else if we don't
6497 // support this channel type.
6498 let channel_type = if let Some(channel_type) = &msg.channel_type {
6499 if channel_type.supports_any_optional_bits() {
6500 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6503 // We only support the channel types defined by the `ChannelManager` in
6504 // `provided_channel_type_features`. The channel type must always support
6505 // `static_remote_key`.
6506 if !channel_type.requires_static_remote_key() {
6507 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6509 // Make sure we support all of the features behind the channel type.
6510 if !channel_type.is_subset(our_supported_features) {
6511 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6513 if channel_type.requires_scid_privacy() && announced_channel {
6514 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6516 channel_type.clone()
6518 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6519 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6520 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6525 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6526 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6527 let pubkeys = holder_signer.pubkeys().clone();
6528 let counterparty_pubkeys = ChannelPublicKeys {
6529 funding_pubkey: msg.funding_pubkey,
6530 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6531 payment_point: msg.payment_point,
6532 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6533 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6536 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6537 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6540 // Check sanity of message fields:
6541 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6542 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6544 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6545 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6547 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6548 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6550 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6551 if msg.push_msat > full_channel_value_msat {
6552 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6554 if msg.dust_limit_satoshis > msg.funding_satoshis {
6555 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6557 if msg.htlc_minimum_msat >= full_channel_value_msat {
6558 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6560 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6562 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6563 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6564 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6566 if msg.max_accepted_htlcs < 1 {
6567 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6569 if msg.max_accepted_htlcs > MAX_HTLCS {
6570 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6573 // Now check against optional parameters as set by config...
6574 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6575 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6577 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6578 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6580 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6581 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6583 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6584 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6586 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6587 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6589 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6590 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6592 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6593 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6596 // Convert things into internal flags and prep our state:
6598 if config.channel_handshake_limits.force_announced_channel_preference {
6599 if config.channel_handshake_config.announced_channel != announced_channel {
6600 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6604 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6605 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6606 // Protocol level safety check in place, although it should never happen because
6607 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6608 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6610 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6611 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6613 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6614 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6615 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6617 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6618 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6621 // check if the funder's amount for the initial commitment tx is sufficient
6622 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6623 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6624 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6628 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6629 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6630 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6631 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6634 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6635 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6636 // want to push much to us), our counterparty should always have more than our reserve.
6637 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6638 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6641 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6642 match &msg.shutdown_scriptpubkey {
6643 &Some(ref script) => {
6644 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6645 if script.len() == 0 {
6648 if !script::is_bolt2_compliant(&script, their_features) {
6649 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6651 Some(script.clone())
6654 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6656 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6661 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6662 match signer_provider.get_shutdown_scriptpubkey() {
6663 Ok(scriptpubkey) => Some(scriptpubkey),
6664 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6668 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6669 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6670 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6674 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6675 Ok(script) => script,
6676 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6679 let mut secp_ctx = Secp256k1::new();
6680 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6682 let minimum_depth = if is_0conf {
6685 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6689 context: ChannelContext {
6692 config: LegacyChannelConfig {
6693 options: config.channel_config.clone(),
6695 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6700 inbound_handshake_limits_override: None,
6702 temporary_channel_id: Some(msg.temporary_channel_id),
6703 channel_id: msg.temporary_channel_id,
6704 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6705 announcement_sigs_state: AnnouncementSigsState::NotSent,
6708 latest_monitor_update_id: 0,
6710 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6711 shutdown_scriptpubkey,
6714 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6715 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6716 value_to_self_msat: msg.push_msat,
6718 pending_inbound_htlcs: Vec::new(),
6719 pending_outbound_htlcs: Vec::new(),
6720 holding_cell_htlc_updates: Vec::new(),
6721 pending_update_fee: None,
6722 holding_cell_update_fee: None,
6723 next_holder_htlc_id: 0,
6724 next_counterparty_htlc_id: 0,
6725 update_time_counter: 1,
6727 resend_order: RAACommitmentOrder::CommitmentFirst,
6729 monitor_pending_channel_ready: false,
6730 monitor_pending_revoke_and_ack: false,
6731 monitor_pending_commitment_signed: false,
6732 monitor_pending_forwards: Vec::new(),
6733 monitor_pending_failures: Vec::new(),
6734 monitor_pending_finalized_fulfills: Vec::new(),
6736 signer_pending_commitment_update: false,
6737 signer_pending_funding: false,
6739 #[cfg(debug_assertions)]
6740 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6741 #[cfg(debug_assertions)]
6742 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6744 last_sent_closing_fee: None,
6745 pending_counterparty_closing_signed: None,
6746 expecting_peer_commitment_signed: false,
6747 closing_fee_limits: None,
6748 target_closing_feerate_sats_per_kw: None,
6750 funding_tx_confirmed_in: None,
6751 funding_tx_confirmation_height: 0,
6752 short_channel_id: None,
6753 channel_creation_height: current_chain_height,
6755 feerate_per_kw: msg.feerate_per_kw,
6756 channel_value_satoshis: msg.funding_satoshis,
6757 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6758 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6759 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6760 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6761 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6762 holder_selected_channel_reserve_satoshis,
6763 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6764 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6765 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6766 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6769 counterparty_forwarding_info: None,
6771 channel_transaction_parameters: ChannelTransactionParameters {
6772 holder_pubkeys: pubkeys,
6773 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6774 is_outbound_from_holder: false,
6775 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6776 selected_contest_delay: msg.to_self_delay,
6777 pubkeys: counterparty_pubkeys,
6779 funding_outpoint: None,
6780 channel_type_features: channel_type.clone()
6782 funding_transaction: None,
6783 is_batch_funding: None,
6785 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6786 counterparty_prev_commitment_point: None,
6787 counterparty_node_id,
6789 counterparty_shutdown_scriptpubkey,
6791 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6793 channel_update_status: ChannelUpdateStatus::Enabled,
6794 closing_signed_in_flight: false,
6796 announcement_sigs: None,
6798 #[cfg(any(test, fuzzing))]
6799 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6800 #[cfg(any(test, fuzzing))]
6801 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6803 workaround_lnd_bug_4006: None,
6804 sent_message_awaiting_response: None,
6806 latest_inbound_scid_alias: None,
6807 outbound_scid_alias: 0,
6809 channel_pending_event_emitted: false,
6810 channel_ready_event_emitted: false,
6812 #[cfg(any(test, fuzzing))]
6813 historical_inbound_htlc_fulfills: HashSet::new(),
6818 blocked_monitor_updates: Vec::new(),
6820 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6826 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6827 /// should be sent back to the counterparty node.
6829 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6830 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6831 if self.context.is_outbound() {
6832 panic!("Tried to send accept_channel for an outbound channel?");
6834 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6835 panic!("Tried to send accept_channel after channel had moved forward");
6837 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6838 panic!("Tried to send an accept_channel for a channel that has already advanced");
6841 self.generate_accept_channel_message()
6844 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6845 /// inbound channel. If the intention is to accept an inbound channel, use
6846 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6848 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6849 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6850 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6851 let keys = self.context.get_holder_pubkeys();
6853 msgs::AcceptChannel {
6854 temporary_channel_id: self.context.channel_id,
6855 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6856 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6857 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6858 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6859 minimum_depth: self.context.minimum_depth.unwrap(),
6860 to_self_delay: self.context.get_holder_selected_contest_delay(),
6861 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6862 funding_pubkey: keys.funding_pubkey,
6863 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6864 payment_point: keys.payment_point,
6865 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6866 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6867 first_per_commitment_point,
6868 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6869 Some(script) => script.clone().into_inner(),
6870 None => Builder::new().into_script(),
6872 channel_type: Some(self.context.channel_type.clone()),
6874 next_local_nonce: None,
6878 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6879 /// inbound channel without accepting it.
6881 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6883 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6884 self.generate_accept_channel_message()
6887 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
6888 let funding_script = self.context.get_funding_redeemscript();
6890 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6891 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6892 let trusted_tx = initial_commitment_tx.trust();
6893 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6894 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6895 // They sign the holder commitment transaction...
6896 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6897 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6898 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6899 encode::serialize_hex(&funding_script), &self.context.channel_id());
6900 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6902 Ok(initial_commitment_tx)
6905 pub fn funding_created<L: Deref>(
6906 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6907 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
6911 if self.context.is_outbound() {
6912 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6914 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6915 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6916 // remember the channel, so it's safe to just send an error_message here and drop the
6918 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6920 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6921 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6922 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6923 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6926 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6927 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6928 // This is an externally observable change before we finish all our checks. In particular
6929 // check_funding_created_signature may fail.
6930 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6932 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
6934 Err(ChannelError::Close(e)) => {
6935 self.context.channel_transaction_parameters.funding_outpoint = None;
6936 return Err((self, ChannelError::Close(e)));
6939 // The only error we know how to handle is ChannelError::Close, so we fall over here
6940 // to make sure we don't continue with an inconsistent state.
6941 panic!("unexpected error type from check_funding_created_signature {:?}", e);
6945 let holder_commitment_tx = HolderCommitmentTransaction::new(
6946 initial_commitment_tx,
6949 &self.context.get_holder_pubkeys().funding_pubkey,
6950 self.context.counterparty_funding_pubkey()
6953 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6954 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6957 // Now that we're past error-generating stuff, update our local state:
6959 self.context.channel_state = ChannelState::FundingSent as u32;
6960 self.context.channel_id = funding_txo.to_channel_id();
6961 self.context.cur_counterparty_commitment_transaction_number -= 1;
6962 self.context.cur_holder_commitment_transaction_number -= 1;
6964 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
6966 let funding_redeemscript = self.context.get_funding_redeemscript();
6967 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6968 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6969 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6970 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6971 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6972 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6973 shutdown_script, self.context.get_holder_selected_contest_delay(),
6974 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6975 &self.context.channel_transaction_parameters,
6976 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6978 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6979 let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
6980 channel_monitor.provide_initial_counterparty_commitment_tx(
6981 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6982 self.context.cur_counterparty_commitment_transaction_number + 1,
6983 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6984 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6985 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
6987 log_info!(logger, "{} funding_signed for peer for channel {}",
6988 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
6990 // Promote the channel to a full-fledged one now that we have updated the state and have a
6991 // `ChannelMonitor`.
6992 let mut channel = Channel {
6993 context: self.context,
6995 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6996 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6998 Ok((channel, funding_signed, channel_monitor))
7002 const SERIALIZATION_VERSION: u8 = 3;
7003 const MIN_SERIALIZATION_VERSION: u8 = 3;
7005 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7011 impl Writeable for ChannelUpdateStatus {
7012 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7013 // We only care about writing out the current state as it was announced, ie only either
7014 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7015 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7017 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7018 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7019 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7020 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7026 impl Readable for ChannelUpdateStatus {
7027 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7028 Ok(match <u8 as Readable>::read(reader)? {
7029 0 => ChannelUpdateStatus::Enabled,
7030 1 => ChannelUpdateStatus::Disabled,
7031 _ => return Err(DecodeError::InvalidValue),
7036 impl Writeable for AnnouncementSigsState {
7037 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7038 // We only care about writing out the current state as if we had just disconnected, at
7039 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7041 AnnouncementSigsState::NotSent => 0u8.write(writer),
7042 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7043 AnnouncementSigsState::Committed => 0u8.write(writer),
7044 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7049 impl Readable for AnnouncementSigsState {
7050 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7051 Ok(match <u8 as Readable>::read(reader)? {
7052 0 => AnnouncementSigsState::NotSent,
7053 1 => AnnouncementSigsState::PeerReceived,
7054 _ => return Err(DecodeError::InvalidValue),
7059 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7060 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7061 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7064 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7066 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7067 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7068 // the low bytes now and the optional high bytes later.
7069 let user_id_low = self.context.user_id as u64;
7070 user_id_low.write(writer)?;
7072 // Version 1 deserializers expected to read parts of the config object here. Version 2
7073 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7074 // `minimum_depth` we simply write dummy values here.
7075 writer.write_all(&[0; 8])?;
7077 self.context.channel_id.write(writer)?;
7078 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
7079 self.context.channel_value_satoshis.write(writer)?;
7081 self.context.latest_monitor_update_id.write(writer)?;
7083 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7084 // deserialized from that format.
7085 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7086 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7087 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7089 self.context.destination_script.write(writer)?;
7091 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7092 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7093 self.context.value_to_self_msat.write(writer)?;
7095 let mut dropped_inbound_htlcs = 0;
7096 for htlc in self.context.pending_inbound_htlcs.iter() {
7097 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7098 dropped_inbound_htlcs += 1;
7101 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7102 for htlc in self.context.pending_inbound_htlcs.iter() {
7103 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7106 htlc.htlc_id.write(writer)?;
7107 htlc.amount_msat.write(writer)?;
7108 htlc.cltv_expiry.write(writer)?;
7109 htlc.payment_hash.write(writer)?;
7111 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7112 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7114 htlc_state.write(writer)?;
7116 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7118 htlc_state.write(writer)?;
7120 &InboundHTLCState::Committed => {
7123 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7125 removal_reason.write(writer)?;
7130 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7131 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7132 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7134 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7135 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
7136 htlc.htlc_id.write(writer)?;
7137 htlc.amount_msat.write(writer)?;
7138 htlc.cltv_expiry.write(writer)?;
7139 htlc.payment_hash.write(writer)?;
7140 htlc.source.write(writer)?;
7142 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7144 onion_packet.write(writer)?;
7146 &OutboundHTLCState::Committed => {
7149 &OutboundHTLCState::RemoteRemoved(_) => {
7150 // Treat this as a Committed because we haven't received the CS - they'll
7151 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7154 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7156 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7157 preimages.push(preimage);
7159 let reason: Option<&HTLCFailReason> = outcome.into();
7160 reason.write(writer)?;
7162 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7164 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7165 preimages.push(preimage);
7167 let reason: Option<&HTLCFailReason> = outcome.into();
7168 reason.write(writer)?;
7171 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
7172 if pending_outbound_skimmed_fees.is_empty() {
7173 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
7175 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
7176 } else if !pending_outbound_skimmed_fees.is_empty() {
7177 pending_outbound_skimmed_fees.push(None);
7179 pending_outbound_blinding_points.push(htlc.blinding_point);
7182 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7183 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7184 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7185 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7187 &HTLCUpdateAwaitingACK::AddHTLC {
7188 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7189 blinding_point, skimmed_fee_msat,
7192 amount_msat.write(writer)?;
7193 cltv_expiry.write(writer)?;
7194 payment_hash.write(writer)?;
7195 source.write(writer)?;
7196 onion_routing_packet.write(writer)?;
7198 if let Some(skimmed_fee) = skimmed_fee_msat {
7199 if holding_cell_skimmed_fees.is_empty() {
7200 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7202 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7203 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7205 holding_cell_blinding_points.push(blinding_point);
7207 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7209 payment_preimage.write(writer)?;
7210 htlc_id.write(writer)?;
7212 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7214 htlc_id.write(writer)?;
7215 err_packet.write(writer)?;
7220 match self.context.resend_order {
7221 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7222 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7225 self.context.monitor_pending_channel_ready.write(writer)?;
7226 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7227 self.context.monitor_pending_commitment_signed.write(writer)?;
7229 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7230 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7231 pending_forward.write(writer)?;
7232 htlc_id.write(writer)?;
7235 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7236 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7237 htlc_source.write(writer)?;
7238 payment_hash.write(writer)?;
7239 fail_reason.write(writer)?;
7242 if self.context.is_outbound() {
7243 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7244 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7245 Some(feerate).write(writer)?;
7247 // As for inbound HTLCs, if the update was only announced and never committed in a
7248 // commitment_signed, drop it.
7249 None::<u32>.write(writer)?;
7251 self.context.holding_cell_update_fee.write(writer)?;
7253 self.context.next_holder_htlc_id.write(writer)?;
7254 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7255 self.context.update_time_counter.write(writer)?;
7256 self.context.feerate_per_kw.write(writer)?;
7258 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7259 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7260 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7261 // consider the stale state on reload.
7264 self.context.funding_tx_confirmed_in.write(writer)?;
7265 self.context.funding_tx_confirmation_height.write(writer)?;
7266 self.context.short_channel_id.write(writer)?;
7268 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7269 self.context.holder_dust_limit_satoshis.write(writer)?;
7270 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7272 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7273 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7275 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7276 self.context.holder_htlc_minimum_msat.write(writer)?;
7277 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7279 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7280 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7282 match &self.context.counterparty_forwarding_info {
7285 info.fee_base_msat.write(writer)?;
7286 info.fee_proportional_millionths.write(writer)?;
7287 info.cltv_expiry_delta.write(writer)?;
7289 None => 0u8.write(writer)?
7292 self.context.channel_transaction_parameters.write(writer)?;
7293 self.context.funding_transaction.write(writer)?;
7295 self.context.counterparty_cur_commitment_point.write(writer)?;
7296 self.context.counterparty_prev_commitment_point.write(writer)?;
7297 self.context.counterparty_node_id.write(writer)?;
7299 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7301 self.context.commitment_secrets.write(writer)?;
7303 self.context.channel_update_status.write(writer)?;
7305 #[cfg(any(test, fuzzing))]
7306 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7307 #[cfg(any(test, fuzzing))]
7308 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7309 htlc.write(writer)?;
7312 // If the channel type is something other than only-static-remote-key, then we need to have
7313 // older clients fail to deserialize this channel at all. If the type is
7314 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7316 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7317 Some(&self.context.channel_type) } else { None };
7319 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7320 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7321 // a different percentage of the channel value then 10%, which older versions of LDK used
7322 // to set it to before the percentage was made configurable.
7323 let serialized_holder_selected_reserve =
7324 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7325 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7327 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7328 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7329 let serialized_holder_htlc_max_in_flight =
7330 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7331 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7333 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7334 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7336 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7337 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7338 // we write the high bytes as an option here.
7339 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7341 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7343 write_tlv_fields!(writer, {
7344 (0, self.context.announcement_sigs, option),
7345 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7346 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7347 // them twice, once with their original default values above, and once as an option
7348 // here. On the read side, old versions will simply ignore the odd-type entries here,
7349 // and new versions map the default values to None and allow the TLV entries here to
7351 (1, self.context.minimum_depth, option),
7352 (2, chan_type, option),
7353 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7354 (4, serialized_holder_selected_reserve, option),
7355 (5, self.context.config, required),
7356 (6, serialized_holder_htlc_max_in_flight, option),
7357 (7, self.context.shutdown_scriptpubkey, option),
7358 (8, self.context.blocked_monitor_updates, optional_vec),
7359 (9, self.context.target_closing_feerate_sats_per_kw, option),
7360 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7361 (13, self.context.channel_creation_height, required),
7362 (15, preimages, required_vec),
7363 (17, self.context.announcement_sigs_state, required),
7364 (19, self.context.latest_inbound_scid_alias, option),
7365 (21, self.context.outbound_scid_alias, required),
7366 (23, channel_ready_event_emitted, option),
7367 (25, user_id_high_opt, option),
7368 (27, self.context.channel_keys_id, required),
7369 (28, holder_max_accepted_htlcs, option),
7370 (29, self.context.temporary_channel_id, option),
7371 (31, channel_pending_event_emitted, option),
7372 (35, pending_outbound_skimmed_fees, optional_vec),
7373 (37, holding_cell_skimmed_fees, optional_vec),
7374 (38, self.context.is_batch_funding, option),
7375 (39, pending_outbound_blinding_points, optional_vec),
7376 (41, holding_cell_blinding_points, optional_vec),
7383 const MAX_ALLOC_SIZE: usize = 64*1024;
7384 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7386 ES::Target: EntropySource,
7387 SP::Target: SignerProvider
7389 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7390 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7391 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7393 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7394 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7395 // the low bytes now and the high bytes later.
7396 let user_id_low: u64 = Readable::read(reader)?;
7398 let mut config = Some(LegacyChannelConfig::default());
7400 // Read the old serialization of the ChannelConfig from version 0.0.98.
7401 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7402 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7403 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7404 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7406 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7407 let mut _val: u64 = Readable::read(reader)?;
7410 let channel_id = Readable::read(reader)?;
7411 let channel_state = Readable::read(reader)?;
7412 let channel_value_satoshis = Readable::read(reader)?;
7414 let latest_monitor_update_id = Readable::read(reader)?;
7416 let mut keys_data = None;
7418 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7419 // the `channel_keys_id` TLV is present below.
7420 let keys_len: u32 = Readable::read(reader)?;
7421 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7422 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7423 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7424 let mut data = [0; 1024];
7425 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7426 reader.read_exact(read_slice)?;
7427 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7431 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7432 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7433 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7436 let destination_script = Readable::read(reader)?;
7438 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7439 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7440 let value_to_self_msat = Readable::read(reader)?;
7442 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7444 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7445 for _ in 0..pending_inbound_htlc_count {
7446 pending_inbound_htlcs.push(InboundHTLCOutput {
7447 htlc_id: Readable::read(reader)?,
7448 amount_msat: Readable::read(reader)?,
7449 cltv_expiry: Readable::read(reader)?,
7450 payment_hash: Readable::read(reader)?,
7451 state: match <u8 as Readable>::read(reader)? {
7452 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7453 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7454 3 => InboundHTLCState::Committed,
7455 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7456 _ => return Err(DecodeError::InvalidValue),
7461 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7462 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7463 for _ in 0..pending_outbound_htlc_count {
7464 pending_outbound_htlcs.push(OutboundHTLCOutput {
7465 htlc_id: Readable::read(reader)?,
7466 amount_msat: Readable::read(reader)?,
7467 cltv_expiry: Readable::read(reader)?,
7468 payment_hash: Readable::read(reader)?,
7469 source: Readable::read(reader)?,
7470 state: match <u8 as Readable>::read(reader)? {
7471 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7472 1 => OutboundHTLCState::Committed,
7474 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7475 OutboundHTLCState::RemoteRemoved(option.into())
7478 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7479 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7482 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7483 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7485 _ => return Err(DecodeError::InvalidValue),
7487 skimmed_fee_msat: None,
7488 blinding_point: None,
7492 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7493 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7494 for _ in 0..holding_cell_htlc_update_count {
7495 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7496 0 => HTLCUpdateAwaitingACK::AddHTLC {
7497 amount_msat: Readable::read(reader)?,
7498 cltv_expiry: Readable::read(reader)?,
7499 payment_hash: Readable::read(reader)?,
7500 source: Readable::read(reader)?,
7501 onion_routing_packet: Readable::read(reader)?,
7502 skimmed_fee_msat: None,
7503 blinding_point: None,
7505 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7506 payment_preimage: Readable::read(reader)?,
7507 htlc_id: Readable::read(reader)?,
7509 2 => HTLCUpdateAwaitingACK::FailHTLC {
7510 htlc_id: Readable::read(reader)?,
7511 err_packet: Readable::read(reader)?,
7513 _ => return Err(DecodeError::InvalidValue),
7517 let resend_order = match <u8 as Readable>::read(reader)? {
7518 0 => RAACommitmentOrder::CommitmentFirst,
7519 1 => RAACommitmentOrder::RevokeAndACKFirst,
7520 _ => return Err(DecodeError::InvalidValue),
7523 let monitor_pending_channel_ready = Readable::read(reader)?;
7524 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7525 let monitor_pending_commitment_signed = Readable::read(reader)?;
7527 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7528 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7529 for _ in 0..monitor_pending_forwards_count {
7530 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7533 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7534 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7535 for _ in 0..monitor_pending_failures_count {
7536 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7539 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7541 let holding_cell_update_fee = Readable::read(reader)?;
7543 let next_holder_htlc_id = Readable::read(reader)?;
7544 let next_counterparty_htlc_id = Readable::read(reader)?;
7545 let update_time_counter = Readable::read(reader)?;
7546 let feerate_per_kw = Readable::read(reader)?;
7548 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7549 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7550 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7551 // consider the stale state on reload.
7552 match <u8 as Readable>::read(reader)? {
7555 let _: u32 = Readable::read(reader)?;
7556 let _: u64 = Readable::read(reader)?;
7557 let _: Signature = Readable::read(reader)?;
7559 _ => return Err(DecodeError::InvalidValue),
7562 let funding_tx_confirmed_in = Readable::read(reader)?;
7563 let funding_tx_confirmation_height = Readable::read(reader)?;
7564 let short_channel_id = Readable::read(reader)?;
7566 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7567 let holder_dust_limit_satoshis = Readable::read(reader)?;
7568 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7569 let mut counterparty_selected_channel_reserve_satoshis = None;
7571 // Read the old serialization from version 0.0.98.
7572 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7574 // Read the 8 bytes of backwards-compatibility data.
7575 let _dummy: u64 = Readable::read(reader)?;
7577 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7578 let holder_htlc_minimum_msat = Readable::read(reader)?;
7579 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7581 let mut minimum_depth = None;
7583 // Read the old serialization from version 0.0.98.
7584 minimum_depth = Some(Readable::read(reader)?);
7586 // Read the 4 bytes of backwards-compatibility data.
7587 let _dummy: u32 = Readable::read(reader)?;
7590 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7592 1 => Some(CounterpartyForwardingInfo {
7593 fee_base_msat: Readable::read(reader)?,
7594 fee_proportional_millionths: Readable::read(reader)?,
7595 cltv_expiry_delta: Readable::read(reader)?,
7597 _ => return Err(DecodeError::InvalidValue),
7600 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7601 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7603 let counterparty_cur_commitment_point = Readable::read(reader)?;
7605 let counterparty_prev_commitment_point = Readable::read(reader)?;
7606 let counterparty_node_id = Readable::read(reader)?;
7608 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7609 let commitment_secrets = Readable::read(reader)?;
7611 let channel_update_status = Readable::read(reader)?;
7613 #[cfg(any(test, fuzzing))]
7614 let mut historical_inbound_htlc_fulfills = HashSet::new();
7615 #[cfg(any(test, fuzzing))]
7617 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7618 for _ in 0..htlc_fulfills_len {
7619 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7623 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7624 Some((feerate, if channel_parameters.is_outbound_from_holder {
7625 FeeUpdateState::Outbound
7627 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7633 let mut announcement_sigs = None;
7634 let mut target_closing_feerate_sats_per_kw = None;
7635 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7636 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7637 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7638 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7639 // only, so we default to that if none was written.
7640 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7641 let mut channel_creation_height = Some(serialized_height);
7642 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7644 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7645 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7646 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7647 let mut latest_inbound_scid_alias = None;
7648 let mut outbound_scid_alias = None;
7649 let mut channel_pending_event_emitted = None;
7650 let mut channel_ready_event_emitted = None;
7652 let mut user_id_high_opt: Option<u64> = None;
7653 let mut channel_keys_id: Option<[u8; 32]> = None;
7654 let mut temporary_channel_id: Option<ChannelId> = None;
7655 let mut holder_max_accepted_htlcs: Option<u16> = None;
7657 let mut blocked_monitor_updates = Some(Vec::new());
7659 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7660 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7662 let mut is_batch_funding: Option<()> = None;
7664 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7665 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7667 read_tlv_fields!(reader, {
7668 (0, announcement_sigs, option),
7669 (1, minimum_depth, option),
7670 (2, channel_type, option),
7671 (3, counterparty_selected_channel_reserve_satoshis, option),
7672 (4, holder_selected_channel_reserve_satoshis, option),
7673 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7674 (6, holder_max_htlc_value_in_flight_msat, option),
7675 (7, shutdown_scriptpubkey, option),
7676 (8, blocked_monitor_updates, optional_vec),
7677 (9, target_closing_feerate_sats_per_kw, option),
7678 (11, monitor_pending_finalized_fulfills, optional_vec),
7679 (13, channel_creation_height, option),
7680 (15, preimages_opt, optional_vec),
7681 (17, announcement_sigs_state, option),
7682 (19, latest_inbound_scid_alias, option),
7683 (21, outbound_scid_alias, option),
7684 (23, channel_ready_event_emitted, option),
7685 (25, user_id_high_opt, option),
7686 (27, channel_keys_id, option),
7687 (28, holder_max_accepted_htlcs, option),
7688 (29, temporary_channel_id, option),
7689 (31, channel_pending_event_emitted, option),
7690 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7691 (37, holding_cell_skimmed_fees_opt, optional_vec),
7692 (38, is_batch_funding, option),
7693 (39, pending_outbound_blinding_points_opt, optional_vec),
7694 (41, holding_cell_blinding_points_opt, optional_vec),
7697 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7698 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7699 // If we've gotten to the funding stage of the channel, populate the signer with its
7700 // required channel parameters.
7701 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7702 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7703 holder_signer.provide_channel_parameters(&channel_parameters);
7705 (channel_keys_id, holder_signer)
7707 // `keys_data` can be `None` if we had corrupted data.
7708 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7709 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7710 (holder_signer.channel_keys_id(), holder_signer)
7713 if let Some(preimages) = preimages_opt {
7714 let mut iter = preimages.into_iter();
7715 for htlc in pending_outbound_htlcs.iter_mut() {
7717 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7718 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7720 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7721 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7726 // We expect all preimages to be consumed above
7727 if iter.next().is_some() {
7728 return Err(DecodeError::InvalidValue);
7732 let chan_features = channel_type.as_ref().unwrap();
7733 if !chan_features.is_subset(our_supported_features) {
7734 // If the channel was written by a new version and negotiated with features we don't
7735 // understand yet, refuse to read it.
7736 return Err(DecodeError::UnknownRequiredFeature);
7739 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7740 // To account for that, we're proactively setting/overriding the field here.
7741 channel_parameters.channel_type_features = chan_features.clone();
7743 let mut secp_ctx = Secp256k1::new();
7744 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7746 // `user_id` used to be a single u64 value. In order to remain backwards
7747 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7748 // separate u64 values.
7749 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7751 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7753 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7754 let mut iter = skimmed_fees.into_iter();
7755 for htlc in pending_outbound_htlcs.iter_mut() {
7756 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7758 // We expect all skimmed fees to be consumed above
7759 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7761 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7762 let mut iter = skimmed_fees.into_iter();
7763 for htlc in holding_cell_htlc_updates.iter_mut() {
7764 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7765 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7768 // We expect all skimmed fees to be consumed above
7769 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7771 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
7772 let mut iter = blinding_pts.into_iter();
7773 for htlc in pending_outbound_htlcs.iter_mut() {
7774 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7776 // We expect all blinding points to be consumed above
7777 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7779 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
7780 let mut iter = blinding_pts.into_iter();
7781 for htlc in holding_cell_htlc_updates.iter_mut() {
7782 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
7783 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7786 // We expect all blinding points to be consumed above
7787 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7791 context: ChannelContext {
7794 config: config.unwrap(),
7798 // Note that we don't care about serializing handshake limits as we only ever serialize
7799 // channel data after the handshake has completed.
7800 inbound_handshake_limits_override: None,
7803 temporary_channel_id,
7805 announcement_sigs_state: announcement_sigs_state.unwrap(),
7807 channel_value_satoshis,
7809 latest_monitor_update_id,
7811 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7812 shutdown_scriptpubkey,
7815 cur_holder_commitment_transaction_number,
7816 cur_counterparty_commitment_transaction_number,
7819 holder_max_accepted_htlcs,
7820 pending_inbound_htlcs,
7821 pending_outbound_htlcs,
7822 holding_cell_htlc_updates,
7826 monitor_pending_channel_ready,
7827 monitor_pending_revoke_and_ack,
7828 monitor_pending_commitment_signed,
7829 monitor_pending_forwards,
7830 monitor_pending_failures,
7831 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7833 signer_pending_commitment_update: false,
7834 signer_pending_funding: false,
7837 holding_cell_update_fee,
7838 next_holder_htlc_id,
7839 next_counterparty_htlc_id,
7840 update_time_counter,
7843 #[cfg(debug_assertions)]
7844 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7845 #[cfg(debug_assertions)]
7846 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7848 last_sent_closing_fee: None,
7849 pending_counterparty_closing_signed: None,
7850 expecting_peer_commitment_signed: false,
7851 closing_fee_limits: None,
7852 target_closing_feerate_sats_per_kw,
7854 funding_tx_confirmed_in,
7855 funding_tx_confirmation_height,
7857 channel_creation_height: channel_creation_height.unwrap(),
7859 counterparty_dust_limit_satoshis,
7860 holder_dust_limit_satoshis,
7861 counterparty_max_htlc_value_in_flight_msat,
7862 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7863 counterparty_selected_channel_reserve_satoshis,
7864 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7865 counterparty_htlc_minimum_msat,
7866 holder_htlc_minimum_msat,
7867 counterparty_max_accepted_htlcs,
7870 counterparty_forwarding_info,
7872 channel_transaction_parameters: channel_parameters,
7873 funding_transaction,
7876 counterparty_cur_commitment_point,
7877 counterparty_prev_commitment_point,
7878 counterparty_node_id,
7880 counterparty_shutdown_scriptpubkey,
7884 channel_update_status,
7885 closing_signed_in_flight: false,
7889 #[cfg(any(test, fuzzing))]
7890 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7891 #[cfg(any(test, fuzzing))]
7892 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7894 workaround_lnd_bug_4006: None,
7895 sent_message_awaiting_response: None,
7897 latest_inbound_scid_alias,
7898 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7899 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7901 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7902 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7904 #[cfg(any(test, fuzzing))]
7905 historical_inbound_htlc_fulfills,
7907 channel_type: channel_type.unwrap(),
7910 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7919 use bitcoin::blockdata::constants::ChainHash;
7920 use bitcoin::blockdata::script::{ScriptBuf, Builder};
7921 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7922 use bitcoin::blockdata::opcodes;
7923 use bitcoin::network::constants::Network;
7924 use crate::ln::{PaymentHash, PaymentPreimage};
7925 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
7926 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7927 use crate::ln::channel::InitFeatures;
7928 use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
7929 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7930 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
7931 use crate::ln::msgs;
7932 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7933 use crate::ln::script::ShutdownScript;
7934 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
7935 use crate::chain::BestBlock;
7936 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7937 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7938 use crate::chain::transaction::OutPoint;
7939 use crate::routing::router::{Path, RouteHop};
7940 use crate::util::config::UserConfig;
7941 use crate::util::errors::APIError;
7942 use crate::util::ser::{ReadableArgs, Writeable};
7943 use crate::util::test_utils;
7944 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7945 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7946 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7947 use bitcoin::secp256k1::{SecretKey,PublicKey};
7948 use bitcoin::hashes::sha256::Hash as Sha256;
7949 use bitcoin::hashes::Hash;
7950 use bitcoin::hashes::hex::FromHex;
7951 use bitcoin::hash_types::WPubkeyHash;
7952 use bitcoin::blockdata::locktime::absolute::LockTime;
7953 use bitcoin::address::{WitnessProgram, WitnessVersion};
7954 use crate::prelude::*;
7956 struct TestFeeEstimator {
7959 impl FeeEstimator for TestFeeEstimator {
7960 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7966 fn test_max_funding_satoshis_no_wumbo() {
7967 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7968 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7969 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7973 signer: InMemorySigner,
7976 impl EntropySource for Keys {
7977 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7980 impl SignerProvider for Keys {
7981 type EcdsaSigner = InMemorySigner;
7983 type TaprootSigner = InMemorySigner;
7985 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7986 self.signer.channel_keys_id()
7989 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
7993 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
7995 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
7996 let secp_ctx = Secp256k1::signing_only();
7997 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
7998 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
7999 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8002 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8003 let secp_ctx = Secp256k1::signing_only();
8004 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8005 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8009 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8010 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8011 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8015 fn upfront_shutdown_script_incompatibility() {
8016 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8017 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8018 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8021 let seed = [42; 32];
8022 let network = Network::Testnet;
8023 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8024 keys_provider.expect(OnGetShutdownScriptpubkey {
8025 returns: non_v0_segwit_shutdown_script.clone(),
8028 let secp_ctx = Secp256k1::new();
8029 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8030 let config = UserConfig::default();
8031 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8032 Err(APIError::IncompatibleShutdownScript { script }) => {
8033 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8035 Err(e) => panic!("Unexpected error: {:?}", e),
8036 Ok(_) => panic!("Expected error"),
8040 // Check that, during channel creation, we use the same feerate in the open channel message
8041 // as we do in the Channel object creation itself.
8043 fn test_open_channel_msg_fee() {
8044 let original_fee = 253;
8045 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8046 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8047 let secp_ctx = Secp256k1::new();
8048 let seed = [42; 32];
8049 let network = Network::Testnet;
8050 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8052 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8053 let config = UserConfig::default();
8054 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8056 // Now change the fee so we can check that the fee in the open_channel message is the
8057 // same as the old fee.
8058 fee_est.fee_est = 500;
8059 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8060 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8064 fn test_holder_vs_counterparty_dust_limit() {
8065 // Test that when calculating the local and remote commitment transaction fees, the correct
8066 // dust limits are used.
8067 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8068 let secp_ctx = Secp256k1::new();
8069 let seed = [42; 32];
8070 let network = Network::Testnet;
8071 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8072 let logger = test_utils::TestLogger::new();
8073 let best_block = BestBlock::from_network(network);
8075 // Go through the flow of opening a channel between two nodes, making sure
8076 // they have different dust limits.
8078 // Create Node A's channel pointing to Node B's pubkey
8079 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8080 let config = UserConfig::default();
8081 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8083 // Create Node B's channel by receiving Node A's open_channel message
8084 // Make sure A's dust limit is as we expect.
8085 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8086 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8087 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8089 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8090 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8091 accept_channel_msg.dust_limit_satoshis = 546;
8092 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8093 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8095 // Node A --> Node B: funding created
8096 let output_script = node_a_chan.context.get_funding_redeemscript();
8097 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8098 value: 10000000, script_pubkey: output_script.clone(),
8100 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8101 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8102 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8104 // Node B --> Node A: funding signed
8105 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8107 // Put some inbound and outbound HTLCs in A's channel.
8108 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8109 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8111 amount_msat: htlc_amount_msat,
8112 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8113 cltv_expiry: 300000000,
8114 state: InboundHTLCState::Committed,
8117 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8119 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8120 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8121 cltv_expiry: 200000000,
8122 state: OutboundHTLCState::Committed,
8123 source: HTLCSource::OutboundRoute {
8124 path: Path { hops: Vec::new(), blinded_tail: None },
8125 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8126 first_hop_htlc_msat: 548,
8127 payment_id: PaymentId([42; 32]),
8129 skimmed_fee_msat: None,
8130 blinding_point: None,
8133 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8134 // the dust limit check.
8135 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8136 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8137 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8138 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8140 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8141 // of the HTLCs are seen to be above the dust limit.
8142 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8143 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8144 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8145 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8146 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8150 fn test_timeout_vs_success_htlc_dust_limit() {
8151 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8152 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8153 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8154 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8155 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8156 let secp_ctx = Secp256k1::new();
8157 let seed = [42; 32];
8158 let network = Network::Testnet;
8159 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8161 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8162 let config = UserConfig::default();
8163 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8165 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8166 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8168 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8169 // counted as dust when it shouldn't be.
8170 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8171 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8172 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8173 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8175 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8176 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8177 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8178 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8179 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8181 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8183 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8184 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8185 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8186 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8187 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8189 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8190 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8191 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8192 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8193 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8197 fn channel_reestablish_no_updates() {
8198 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8199 let logger = test_utils::TestLogger::new();
8200 let secp_ctx = Secp256k1::new();
8201 let seed = [42; 32];
8202 let network = Network::Testnet;
8203 let best_block = BestBlock::from_network(network);
8204 let chain_hash = ChainHash::using_genesis_block(network);
8205 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8207 // Go through the flow of opening a channel between two nodes.
8209 // Create Node A's channel pointing to Node B's pubkey
8210 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8211 let config = UserConfig::default();
8212 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8214 // Create Node B's channel by receiving Node A's open_channel message
8215 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8216 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8217 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8219 // Node B --> Node A: accept channel
8220 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8221 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8223 // Node A --> Node B: funding created
8224 let output_script = node_a_chan.context.get_funding_redeemscript();
8225 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8226 value: 10000000, script_pubkey: output_script.clone(),
8228 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8229 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8230 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8232 // Node B --> Node A: funding signed
8233 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8235 // Now disconnect the two nodes and check that the commitment point in
8236 // Node B's channel_reestablish message is sane.
8237 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8238 let msg = node_b_chan.get_channel_reestablish(&&logger);
8239 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8240 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8241 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8243 // Check that the commitment point in Node A's channel_reestablish message
8245 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8246 let msg = node_a_chan.get_channel_reestablish(&&logger);
8247 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8248 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8249 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8253 fn test_configured_holder_max_htlc_value_in_flight() {
8254 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8255 let logger = test_utils::TestLogger::new();
8256 let secp_ctx = Secp256k1::new();
8257 let seed = [42; 32];
8258 let network = Network::Testnet;
8259 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8260 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8261 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8263 let mut config_2_percent = UserConfig::default();
8264 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8265 let mut config_99_percent = UserConfig::default();
8266 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8267 let mut config_0_percent = UserConfig::default();
8268 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8269 let mut config_101_percent = UserConfig::default();
8270 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8272 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8273 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8274 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8275 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8276 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8277 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8279 // Test with the upper bound - 1 of valid values (99%).
8280 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8281 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8282 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8284 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8286 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8287 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8288 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8289 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8290 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8291 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8293 // Test with the upper bound - 1 of valid values (99%).
8294 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8295 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8296 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8298 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8299 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8300 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8301 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8302 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8304 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8305 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8307 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8308 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8309 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8311 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8312 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8313 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8314 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8315 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8317 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8318 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8320 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8321 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8322 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8326 fn test_configured_holder_selected_channel_reserve_satoshis() {
8328 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8329 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8330 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8332 // Test with valid but unreasonably high channel reserves
8333 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8334 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8335 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8337 // Test with calculated channel reserve less than lower bound
8338 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8339 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8341 // Test with invalid channel reserves since sum of both is greater than or equal
8343 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8344 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8347 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8348 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8349 let logger = test_utils::TestLogger::new();
8350 let secp_ctx = Secp256k1::new();
8351 let seed = [42; 32];
8352 let network = Network::Testnet;
8353 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8354 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8355 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8358 let mut outbound_node_config = UserConfig::default();
8359 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8360 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8362 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8363 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8365 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8366 let mut inbound_node_config = UserConfig::default();
8367 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8369 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8370 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8372 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8374 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8375 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8377 // Channel Negotiations failed
8378 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8379 assert!(result.is_err());
8384 fn channel_update() {
8385 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8386 let logger = test_utils::TestLogger::new();
8387 let secp_ctx = Secp256k1::new();
8388 let seed = [42; 32];
8389 let network = Network::Testnet;
8390 let best_block = BestBlock::from_network(network);
8391 let chain_hash = ChainHash::using_genesis_block(network);
8392 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8394 // Create Node A's channel pointing to Node B's pubkey
8395 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8396 let config = UserConfig::default();
8397 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8399 // Create Node B's channel by receiving Node A's open_channel message
8400 // Make sure A's dust limit is as we expect.
8401 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8402 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8403 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8405 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8406 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8407 accept_channel_msg.dust_limit_satoshis = 546;
8408 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8409 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8411 // Node A --> Node B: funding created
8412 let output_script = node_a_chan.context.get_funding_redeemscript();
8413 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8414 value: 10000000, script_pubkey: output_script.clone(),
8416 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8417 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8418 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8420 // Node B --> Node A: funding signed
8421 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8423 // Make sure that receiving a channel update will update the Channel as expected.
8424 let update = ChannelUpdate {
8425 contents: UnsignedChannelUpdate {
8427 short_channel_id: 0,
8430 cltv_expiry_delta: 100,
8431 htlc_minimum_msat: 5,
8432 htlc_maximum_msat: MAX_VALUE_MSAT,
8434 fee_proportional_millionths: 11,
8435 excess_data: Vec::new(),
8437 signature: Signature::from(unsafe { FFISignature::new() })
8439 assert!(node_a_chan.channel_update(&update).unwrap());
8441 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8442 // change our official htlc_minimum_msat.
8443 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8444 match node_a_chan.context.counterparty_forwarding_info() {
8446 assert_eq!(info.cltv_expiry_delta, 100);
8447 assert_eq!(info.fee_base_msat, 110);
8448 assert_eq!(info.fee_proportional_millionths, 11);
8450 None => panic!("expected counterparty forwarding info to be Some")
8453 assert!(!node_a_chan.channel_update(&update).unwrap());
8457 fn blinding_point_ser() {
8458 // Ensure that channel blinding points are (de)serialized properly.
8459 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8460 let secp_ctx = Secp256k1::new();
8461 let seed = [42; 32];
8462 let network = Network::Testnet;
8463 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8465 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8466 let config = UserConfig::default();
8467 let features = channelmanager::provided_init_features(&config);
8468 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8469 let mut chan = Channel { context: outbound_chan.context };
8471 let dummy_htlc_source = HTLCSource::OutboundRoute {
8473 hops: vec![RouteHop {
8474 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8475 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8476 cltv_expiry_delta: 0, maybe_announced_channel: false,
8480 session_priv: test_utils::privkey(42),
8481 first_hop_htlc_msat: 0,
8482 payment_id: PaymentId([42; 32]),
8484 let dummy_outbound_output = OutboundHTLCOutput {
8487 payment_hash: PaymentHash([43; 32]),
8489 state: OutboundHTLCState::Committed,
8490 source: dummy_htlc_source.clone(),
8491 skimmed_fee_msat: None,
8492 blinding_point: None,
8494 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8495 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8497 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8500 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8502 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8505 payment_hash: PaymentHash([43; 32]),
8506 source: dummy_htlc_source.clone(),
8507 onion_routing_packet: msgs::OnionPacket {
8509 public_key: Ok(test_utils::pubkey(1)),
8510 hop_data: [0; 20*65],
8513 skimmed_fee_msat: None,
8514 blinding_point: None,
8516 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8517 payment_preimage: PaymentPreimage([42; 32]),
8520 let mut holding_cell_htlc_updates = Vec::with_capacity(10);
8523 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8524 } else if i % 3 == 1 {
8525 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8527 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8528 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = &mut dummy_add {
8529 *blinding_point = Some(test_utils::pubkey(42 + i));
8531 holding_cell_htlc_updates.push(dummy_add);
8534 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8536 // Encode and decode the channel and ensure that the HTLCs within are the same.
8537 let encoded_chan = chan.encode();
8538 let mut s = crate::io::Cursor::new(&encoded_chan);
8539 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8540 let features = channelmanager::provided_channel_type_features(&config);
8541 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8542 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8543 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8546 #[cfg(feature = "_test_vectors")]
8548 fn outbound_commitment_test() {
8549 use bitcoin::sighash;
8550 use bitcoin::consensus::encode::serialize;
8551 use bitcoin::sighash::EcdsaSighashType;
8552 use bitcoin::hashes::hex::FromHex;
8553 use bitcoin::hash_types::Txid;
8554 use bitcoin::secp256k1::Message;
8555 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8556 use crate::ln::PaymentPreimage;
8557 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8558 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8559 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8560 use crate::util::logger::Logger;
8561 use crate::sync::Arc;
8562 use core::str::FromStr;
8563 use hex::DisplayHex;
8565 // Test vectors from BOLT 3 Appendices C and F (anchors):
8566 let feeest = TestFeeEstimator{fee_est: 15000};
8567 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8568 let secp_ctx = Secp256k1::new();
8570 let mut signer = InMemorySigner::new(
8572 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8573 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8574 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8575 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8576 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8578 // These aren't set in the test vectors:
8579 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8585 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8586 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8587 let keys_provider = Keys { signer: signer.clone() };
8589 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8590 let mut config = UserConfig::default();
8591 config.channel_handshake_config.announced_channel = false;
8592 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8593 chan.context.holder_dust_limit_satoshis = 546;
8594 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8596 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8598 let counterparty_pubkeys = ChannelPublicKeys {
8599 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8600 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8601 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8602 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8603 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8605 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8606 CounterpartyChannelTransactionParameters {
8607 pubkeys: counterparty_pubkeys.clone(),
8608 selected_contest_delay: 144
8610 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8611 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8613 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8614 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8616 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8617 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8619 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8620 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8622 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8623 // derived from a commitment_seed, so instead we copy it here and call
8624 // build_commitment_transaction.
8625 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8626 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8627 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8628 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8629 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8631 macro_rules! test_commitment {
8632 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8633 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8634 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8638 macro_rules! test_commitment_with_anchors {
8639 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8640 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8641 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8645 macro_rules! test_commitment_common {
8646 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8647 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8649 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8650 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8652 let htlcs = commitment_stats.htlcs_included.drain(..)
8653 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8655 (commitment_stats.tx, htlcs)
8657 let trusted_tx = commitment_tx.trust();
8658 let unsigned_tx = trusted_tx.built_transaction();
8659 let redeemscript = chan.context.get_funding_redeemscript();
8660 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8661 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8662 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8663 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8665 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8666 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8667 let mut counterparty_htlc_sigs = Vec::new();
8668 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8670 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8671 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8672 counterparty_htlc_sigs.push(remote_signature);
8674 assert_eq!(htlcs.len(), per_htlc.len());
8676 let holder_commitment_tx = HolderCommitmentTransaction::new(
8677 commitment_tx.clone(),
8678 counterparty_signature,
8679 counterparty_htlc_sigs,
8680 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8681 chan.context.counterparty_funding_pubkey()
8683 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8684 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8686 let funding_redeemscript = chan.context.get_funding_redeemscript();
8687 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8688 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8690 // ((htlc, counterparty_sig), (index, holder_sig))
8691 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8694 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8695 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8697 let ref htlc = htlcs[$htlc_idx];
8698 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8699 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8700 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8701 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8702 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8703 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8704 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8706 let mut preimage: Option<PaymentPreimage> = None;
8709 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8710 if out == htlc.payment_hash {
8711 preimage = Some(PaymentPreimage([i; 32]));
8715 assert!(preimage.is_some());
8718 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8719 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8720 channel_derivation_parameters: ChannelDerivationParameters {
8721 value_satoshis: chan.context.channel_value_satoshis,
8722 keys_id: chan.context.channel_keys_id,
8723 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8725 commitment_txid: trusted_tx.txid(),
8726 per_commitment_number: trusted_tx.commitment_number(),
8727 per_commitment_point: trusted_tx.per_commitment_point(),
8728 feerate_per_kw: trusted_tx.feerate_per_kw(),
8730 preimage: preimage.clone(),
8731 counterparty_sig: *htlc_counterparty_sig,
8732 }, &secp_ctx).unwrap();
8733 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8734 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8736 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
8737 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8738 let trusted_tx = holder_commitment_tx.trust();
8739 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8740 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
8741 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
8743 assert!(htlc_counterparty_sig_iter.next().is_none());
8747 // anchors: simple commitment tx with no HTLCs and single anchor
8748 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8749 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8750 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8752 // simple commitment tx with no HTLCs
8753 chan.context.value_to_self_msat = 7000000000;
8755 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8756 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8757 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8759 // anchors: simple commitment tx with no HTLCs
8760 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8761 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8762 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8764 chan.context.pending_inbound_htlcs.push({
8765 let mut out = InboundHTLCOutput{
8767 amount_msat: 1000000,
8769 payment_hash: PaymentHash([0; 32]),
8770 state: InboundHTLCState::Committed,
8772 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
8775 chan.context.pending_inbound_htlcs.push({
8776 let mut out = InboundHTLCOutput{
8778 amount_msat: 2000000,
8780 payment_hash: PaymentHash([0; 32]),
8781 state: InboundHTLCState::Committed,
8783 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
8786 chan.context.pending_outbound_htlcs.push({
8787 let mut out = OutboundHTLCOutput{
8789 amount_msat: 2000000,
8791 payment_hash: PaymentHash([0; 32]),
8792 state: OutboundHTLCState::Committed,
8793 source: HTLCSource::dummy(),
8794 skimmed_fee_msat: None,
8795 blinding_point: None,
8797 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
8800 chan.context.pending_outbound_htlcs.push({
8801 let mut out = OutboundHTLCOutput{
8803 amount_msat: 3000000,
8805 payment_hash: PaymentHash([0; 32]),
8806 state: OutboundHTLCState::Committed,
8807 source: HTLCSource::dummy(),
8808 skimmed_fee_msat: None,
8809 blinding_point: None,
8811 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
8814 chan.context.pending_inbound_htlcs.push({
8815 let mut out = InboundHTLCOutput{
8817 amount_msat: 4000000,
8819 payment_hash: PaymentHash([0; 32]),
8820 state: InboundHTLCState::Committed,
8822 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
8826 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8827 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8828 chan.context.feerate_per_kw = 0;
8830 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8831 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8832 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8835 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8836 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8837 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8840 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8841 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8842 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8845 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8846 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8847 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8850 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8851 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8852 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8855 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8856 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8857 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8860 // commitment tx with seven outputs untrimmed (maximum feerate)
8861 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8862 chan.context.feerate_per_kw = 647;
8864 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8865 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8866 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8869 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8870 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8871 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8874 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8875 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8876 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8879 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8880 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8881 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8884 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8885 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8886 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8889 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8890 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8891 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8894 // commitment tx with six outputs untrimmed (minimum feerate)
8895 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8896 chan.context.feerate_per_kw = 648;
8898 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8899 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8900 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8903 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8904 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8905 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8908 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8909 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8910 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8913 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8914 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8915 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8918 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8919 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8920 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8923 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8924 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8925 chan.context.feerate_per_kw = 645;
8926 chan.context.holder_dust_limit_satoshis = 1001;
8928 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8929 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8930 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8933 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8934 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8935 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8938 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8939 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8940 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8943 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8944 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8945 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8948 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8949 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8950 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8953 // commitment tx with six outputs untrimmed (maximum feerate)
8954 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8955 chan.context.feerate_per_kw = 2069;
8956 chan.context.holder_dust_limit_satoshis = 546;
8958 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8959 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8960 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8963 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8964 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8965 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8968 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8969 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8970 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8973 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8974 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8975 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8978 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8979 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8980 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8983 // commitment tx with five outputs untrimmed (minimum feerate)
8984 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8985 chan.context.feerate_per_kw = 2070;
8987 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8988 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8989 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8992 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
8993 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
8994 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8997 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
8998 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
8999 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9002 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9003 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9004 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9007 // commitment tx with five outputs untrimmed (maximum feerate)
9008 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9009 chan.context.feerate_per_kw = 2194;
9011 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9012 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9013 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9016 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9017 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9018 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9021 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9022 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9023 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9026 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9027 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9028 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9031 // commitment tx with four outputs untrimmed (minimum feerate)
9032 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9033 chan.context.feerate_per_kw = 2195;
9035 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9036 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9037 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9040 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9041 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9042 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9045 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9046 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9047 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9050 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9051 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9052 chan.context.feerate_per_kw = 2185;
9053 chan.context.holder_dust_limit_satoshis = 2001;
9054 let cached_channel_type = chan.context.channel_type;
9055 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9057 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9058 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9059 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9062 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9063 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9064 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9067 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9068 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9069 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9072 // commitment tx with four outputs untrimmed (maximum feerate)
9073 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9074 chan.context.feerate_per_kw = 3702;
9075 chan.context.holder_dust_limit_satoshis = 546;
9076 chan.context.channel_type = cached_channel_type.clone();
9078 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9079 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9080 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9083 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9084 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9085 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9088 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9089 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9090 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9093 // commitment tx with three outputs untrimmed (minimum feerate)
9094 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9095 chan.context.feerate_per_kw = 3703;
9097 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9098 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9099 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9102 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9103 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9104 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9107 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9108 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9109 chan.context.feerate_per_kw = 3687;
9110 chan.context.holder_dust_limit_satoshis = 3001;
9111 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9113 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9114 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9115 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9118 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9119 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9120 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9123 // commitment tx with three outputs untrimmed (maximum feerate)
9124 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9125 chan.context.feerate_per_kw = 4914;
9126 chan.context.holder_dust_limit_satoshis = 546;
9127 chan.context.channel_type = cached_channel_type.clone();
9129 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9130 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9131 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9134 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9135 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9136 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9139 // commitment tx with two outputs untrimmed (minimum feerate)
9140 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9141 chan.context.feerate_per_kw = 4915;
9142 chan.context.holder_dust_limit_satoshis = 546;
9144 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9145 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9146 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9148 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9149 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9150 chan.context.feerate_per_kw = 4894;
9151 chan.context.holder_dust_limit_satoshis = 4001;
9152 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9154 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9155 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9156 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9158 // commitment tx with two outputs untrimmed (maximum feerate)
9159 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9160 chan.context.feerate_per_kw = 9651180;
9161 chan.context.holder_dust_limit_satoshis = 546;
9162 chan.context.channel_type = cached_channel_type.clone();
9164 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9165 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9166 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9168 // commitment tx with one output untrimmed (minimum feerate)
9169 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9170 chan.context.feerate_per_kw = 9651181;
9172 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9173 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9174 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9176 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9177 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9178 chan.context.feerate_per_kw = 6216010;
9179 chan.context.holder_dust_limit_satoshis = 4001;
9180 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9182 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9183 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9184 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9186 // commitment tx with fee greater than funder amount
9187 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9188 chan.context.feerate_per_kw = 9651936;
9189 chan.context.holder_dust_limit_satoshis = 546;
9190 chan.context.channel_type = cached_channel_type;
9192 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9193 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9194 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9196 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9197 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9198 chan.context.feerate_per_kw = 253;
9199 chan.context.pending_inbound_htlcs.clear();
9200 chan.context.pending_inbound_htlcs.push({
9201 let mut out = InboundHTLCOutput{
9203 amount_msat: 2000000,
9205 payment_hash: PaymentHash([0; 32]),
9206 state: InboundHTLCState::Committed,
9208 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9211 chan.context.pending_outbound_htlcs.clear();
9212 chan.context.pending_outbound_htlcs.push({
9213 let mut out = OutboundHTLCOutput{
9215 amount_msat: 5000001,
9217 payment_hash: PaymentHash([0; 32]),
9218 state: OutboundHTLCState::Committed,
9219 source: HTLCSource::dummy(),
9220 skimmed_fee_msat: None,
9221 blinding_point: None,
9223 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9226 chan.context.pending_outbound_htlcs.push({
9227 let mut out = OutboundHTLCOutput{
9229 amount_msat: 5000000,
9231 payment_hash: PaymentHash([0; 32]),
9232 state: OutboundHTLCState::Committed,
9233 source: HTLCSource::dummy(),
9234 skimmed_fee_msat: None,
9235 blinding_point: None,
9237 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9241 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9242 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9243 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9246 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9247 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9248 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9250 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9251 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9252 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9254 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9255 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9256 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9259 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9260 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9261 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9262 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9265 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9266 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9267 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9269 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9270 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9271 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9273 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9274 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9275 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9280 fn test_per_commitment_secret_gen() {
9281 // Test vectors from BOLT 3 Appendix D:
9283 let mut seed = [0; 32];
9284 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9285 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9286 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9288 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9289 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9290 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9292 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9293 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9295 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9296 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9298 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9299 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9300 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9304 fn test_key_derivation() {
9305 // Test vectors from BOLT 3 Appendix E:
9306 let secp_ctx = Secp256k1::new();
9308 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9309 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9311 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9312 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9314 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9315 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9317 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9318 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9320 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9321 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9323 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9324 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9328 fn test_zero_conf_channel_type_support() {
9329 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9330 let secp_ctx = Secp256k1::new();
9331 let seed = [42; 32];
9332 let network = Network::Testnet;
9333 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9334 let logger = test_utils::TestLogger::new();
9336 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9337 let config = UserConfig::default();
9338 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9339 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9341 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9342 channel_type_features.set_zero_conf_required();
9344 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9345 open_channel_msg.channel_type = Some(channel_type_features);
9346 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9347 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9348 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9349 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9350 assert!(res.is_ok());
9354 fn test_supports_anchors_zero_htlc_tx_fee() {
9355 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9356 // resulting `channel_type`.
9357 let secp_ctx = Secp256k1::new();
9358 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9359 let network = Network::Testnet;
9360 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9361 let logger = test_utils::TestLogger::new();
9363 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9364 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9366 let mut config = UserConfig::default();
9367 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9369 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9370 // need to signal it.
9371 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9372 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9373 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9374 &config, 0, 42, None
9376 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9378 let mut expected_channel_type = ChannelTypeFeatures::empty();
9379 expected_channel_type.set_static_remote_key_required();
9380 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9382 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9383 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9384 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9388 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9389 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9390 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9391 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9392 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9395 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9396 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9400 fn test_rejects_implicit_simple_anchors() {
9401 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9402 // each side's `InitFeatures`, it is rejected.
9403 let secp_ctx = Secp256k1::new();
9404 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9405 let network = Network::Testnet;
9406 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9407 let logger = test_utils::TestLogger::new();
9409 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9410 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9412 let config = UserConfig::default();
9414 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9415 let static_remote_key_required: u64 = 1 << 12;
9416 let simple_anchors_required: u64 = 1 << 20;
9417 let raw_init_features = static_remote_key_required | simple_anchors_required;
9418 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9420 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9421 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9422 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9426 // Set `channel_type` to `None` to force the implicit feature negotiation.
9427 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9428 open_channel_msg.channel_type = None;
9430 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9431 // `static_remote_key`, it will fail the channel.
9432 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9433 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9434 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9435 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9437 assert!(channel_b.is_err());
9441 fn test_rejects_simple_anchors_channel_type() {
9442 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9444 let secp_ctx = Secp256k1::new();
9445 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9446 let network = Network::Testnet;
9447 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9448 let logger = test_utils::TestLogger::new();
9450 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9451 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9453 let config = UserConfig::default();
9455 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9456 let static_remote_key_required: u64 = 1 << 12;
9457 let simple_anchors_required: u64 = 1 << 20;
9458 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9459 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9460 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9461 assert!(!simple_anchors_init.requires_unknown_bits());
9462 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9464 // First, we'll try to open a channel between A and B where A requests a channel type for
9465 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9466 // B as it's not supported by LDK.
9467 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9468 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9469 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9473 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9474 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9476 let res = InboundV1Channel::<&TestKeysInterface>::new(
9477 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9478 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9479 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9481 assert!(res.is_err());
9483 // Then, we'll try to open another channel where A requests a channel type for
9484 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9485 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9487 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9488 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9489 10000000, 100000, 42, &config, 0, 42, None
9492 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9494 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9495 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9496 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9497 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9500 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9501 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9503 let res = channel_a.accept_channel(
9504 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9506 assert!(res.is_err());
9510 fn test_waiting_for_batch() {
9511 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9512 let logger = test_utils::TestLogger::new();
9513 let secp_ctx = Secp256k1::new();
9514 let seed = [42; 32];
9515 let network = Network::Testnet;
9516 let best_block = BestBlock::from_network(network);
9517 let chain_hash = ChainHash::using_genesis_block(network);
9518 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9520 let mut config = UserConfig::default();
9521 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9522 // channel in a batch before all channels are ready.
9523 config.channel_handshake_limits.trust_own_funding_0conf = true;
9525 // Create a channel from node a to node b that will be part of batch funding.
9526 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9527 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9532 &channelmanager::provided_init_features(&config),
9542 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9543 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9544 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9549 &channelmanager::provided_channel_type_features(&config),
9550 &channelmanager::provided_init_features(&config),
9556 true, // Allow node b to send a 0conf channel_ready.
9559 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9560 node_a_chan.accept_channel(
9561 &accept_channel_msg,
9562 &config.channel_handshake_limits,
9563 &channelmanager::provided_init_features(&config),
9566 // Fund the channel with a batch funding transaction.
9567 let output_script = node_a_chan.context.get_funding_redeemscript();
9568 let tx = Transaction {
9570 lock_time: LockTime::ZERO,
9574 value: 10000000, script_pubkey: output_script.clone(),
9577 value: 10000000, script_pubkey: Builder::new().into_script(),
9580 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9581 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9586 ).map_err(|_| ()).unwrap();
9587 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9588 &funding_created_msg.unwrap(),
9592 ).map_err(|_| ()).unwrap();
9593 let node_b_updates = node_b_chan.monitor_updating_restored(
9601 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9602 // broadcasting the funding transaction until the batch is ready.
9603 let _ = node_a_chan.funding_signed(
9604 &funding_signed_msg.unwrap(),
9609 let node_a_updates = node_a_chan.monitor_updating_restored(
9616 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9617 // as the funding transaction depends on all channels in the batch becoming ready.
9618 assert!(node_a_updates.channel_ready.is_none());
9619 assert!(node_a_updates.funding_broadcastable.is_none());
9621 node_a_chan.context.channel_state,
9622 ChannelState::FundingSent as u32 |
9623 ChannelState::WaitingForBatch as u32,
9626 // It is possible to receive a 0conf channel_ready from the remote node.
9627 node_a_chan.channel_ready(
9628 &node_b_updates.channel_ready.unwrap(),
9636 node_a_chan.context.channel_state,
9637 ChannelState::FundingSent as u32 |
9638 ChannelState::WaitingForBatch as u32 |
9639 ChannelState::TheirChannelReady as u32,
9642 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9643 node_a_chan.set_batch_ready();
9645 node_a_chan.context.channel_state,
9646 ChannelState::FundingSent as u32 |
9647 ChannelState::TheirChannelReady as u32,
9649 assert!(node_a_chan.check_get_channel_ready(0).is_some());