1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
264 /// There are a few "states" and then a number of flags which can be applied:
265 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
266 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
267 /// move on to `ChannelReady`.
268 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
269 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
270 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
272 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
273 OurInitSent = 1 << 0,
274 /// Implies we have received their `open_channel`/`accept_channel` message
275 TheirInitSent = 1 << 1,
276 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
277 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
278 /// upon receipt of `funding_created`, so simply skip this state.
280 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
281 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
282 /// and our counterparty consider the funding transaction confirmed.
284 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
285 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
286 TheirChannelReady = 1 << 4,
287 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
288 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
289 OurChannelReady = 1 << 5,
291 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
292 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
294 PeerDisconnected = 1 << 7,
295 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
296 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
297 /// sending any outbound messages until they've managed to finish.
298 MonitorUpdateInProgress = 1 << 8,
299 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
300 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
301 /// messages as then we will be unable to determine which HTLCs they included in their
302 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
304 /// Flag is set on `ChannelReady`.
305 AwaitingRemoteRevoke = 1 << 9,
306 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
307 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
308 /// to respond with our own shutdown message when possible.
309 RemoteShutdownSent = 1 << 10,
310 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
311 /// point, we may not add any new HTLCs to the channel.
312 LocalShutdownSent = 1 << 11,
313 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
314 /// to drop us, but we store this anyway.
315 ShutdownComplete = 4096,
316 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
317 /// broadcasting of the funding transaction is being held until all channels in the batch
318 /// have received funding_signed and have their monitors persisted.
319 WaitingForBatch = 1 << 13,
321 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
322 ChannelState::LocalShutdownSent as u32 |
323 ChannelState::RemoteShutdownSent as u32;
324 const MULTI_STATE_FLAGS: u32 =
325 BOTH_SIDES_SHUTDOWN_MASK |
326 ChannelState::PeerDisconnected as u32 |
327 ChannelState::MonitorUpdateInProgress as u32;
328 const STATE_FLAGS: u32 =
330 ChannelState::TheirChannelReady as u32 |
331 ChannelState::OurChannelReady as u32 |
332 ChannelState::AwaitingRemoteRevoke as u32 |
333 ChannelState::WaitingForBatch as u32;
335 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
337 pub const DEFAULT_MAX_HTLCS: u16 = 50;
339 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
340 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
341 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
342 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
346 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
348 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
350 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
352 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
353 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
354 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
355 /// `holder_max_htlc_value_in_flight_msat`.
356 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
358 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
359 /// `option_support_large_channel` (aka wumbo channels) is not supported.
361 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
363 /// Total bitcoin supply in satoshis.
364 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
366 /// The maximum network dust limit for standard script formats. This currently represents the
367 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
368 /// transaction non-standard and thus refuses to relay it.
369 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
370 /// implementations use this value for their dust limit today.
371 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
373 /// The maximum channel dust limit we will accept from our counterparty.
374 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
376 /// The dust limit is used for both the commitment transaction outputs as well as the closing
377 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
378 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
379 /// In order to avoid having to concern ourselves with standardness during the closing process, we
380 /// simply require our counterparty to use a dust limit which will leave any segwit output
382 /// See <https://github.com/lightning/bolts/issues/905> for more details.
383 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
385 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
386 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
388 /// Used to return a simple Error back to ChannelManager. Will get converted to a
389 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
390 /// channel_id in ChannelManager.
391 pub(super) enum ChannelError {
397 impl fmt::Debug for ChannelError {
398 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
400 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
401 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
402 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
407 impl fmt::Display for ChannelError {
408 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
410 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
411 &ChannelError::Warn(ref e) => write!(f, "{}", e),
412 &ChannelError::Close(ref e) => write!(f, "{}", e),
417 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
419 pub peer_id: Option<PublicKey>,
420 pub channel_id: Option<ChannelId>,
423 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
424 fn log(&self, mut record: Record) {
425 record.peer_id = self.peer_id;
426 record.channel_id = self.channel_id;
427 self.logger.log(record)
431 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
432 where L::Target: Logger {
433 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
434 where S::Target: SignerProvider
438 peer_id: Some(context.counterparty_node_id),
439 channel_id: Some(context.channel_id),
444 macro_rules! secp_check {
445 ($res: expr, $err: expr) => {
448 Err(_) => return Err(ChannelError::Close($err)),
453 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
454 /// our counterparty or not. However, we don't want to announce updates right away to avoid
455 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
456 /// our channel_update message and track the current state here.
457 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
458 #[derive(Clone, Copy, PartialEq)]
459 pub(super) enum ChannelUpdateStatus {
460 /// We've announced the channel as enabled and are connected to our peer.
462 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
464 /// Our channel is live again, but we haven't announced the channel as enabled yet.
466 /// We've announced the channel as disabled.
470 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
472 pub enum AnnouncementSigsState {
473 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
474 /// we sent the last `AnnouncementSignatures`.
476 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
477 /// This state never appears on disk - instead we write `NotSent`.
479 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
480 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
481 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
482 /// they send back a `RevokeAndACK`.
483 /// This state never appears on disk - instead we write `NotSent`.
485 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
486 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
490 /// An enum indicating whether the local or remote side offered a given HTLC.
496 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
499 pending_htlcs_value_msat: u64,
500 on_counterparty_tx_dust_exposure_msat: u64,
501 on_holder_tx_dust_exposure_msat: u64,
502 holding_cell_msat: u64,
503 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
506 /// An enum gathering stats on commitment transaction, either local or remote.
507 struct CommitmentStats<'a> {
508 tx: CommitmentTransaction, // the transaction info
509 feerate_per_kw: u32, // the feerate included to build the transaction
510 total_fee_sat: u64, // the total fee included in the transaction
511 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
512 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
513 local_balance_msat: u64, // local balance before fees but considering dust limits
514 remote_balance_msat: u64, // remote balance before fees but considering dust limits
515 preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
518 /// Used when calculating whether we or the remote can afford an additional HTLC.
519 struct HTLCCandidate {
521 origin: HTLCInitiator,
525 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
533 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
535 enum UpdateFulfillFetch {
537 monitor_update: ChannelMonitorUpdate,
538 htlc_value_msat: u64,
539 msg: Option<msgs::UpdateFulfillHTLC>,
544 /// The return type of get_update_fulfill_htlc_and_commit.
545 pub enum UpdateFulfillCommitFetch {
546 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
547 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
548 /// previously placed in the holding cell (and has since been removed).
550 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
551 monitor_update: ChannelMonitorUpdate,
552 /// The value of the HTLC which was claimed, in msat.
553 htlc_value_msat: u64,
555 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
556 /// or has been forgotten (presumably previously claimed).
560 /// The return value of `monitor_updating_restored`
561 pub(super) struct MonitorRestoreUpdates {
562 pub raa: Option<msgs::RevokeAndACK>,
563 pub commitment_update: Option<msgs::CommitmentUpdate>,
564 pub order: RAACommitmentOrder,
565 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
566 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
567 pub finalized_claimed_htlcs: Vec<HTLCSource>,
568 pub funding_broadcastable: Option<Transaction>,
569 pub channel_ready: Option<msgs::ChannelReady>,
570 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
573 /// The return value of `signer_maybe_unblocked`
575 pub(super) struct SignerResumeUpdates {
576 pub commitment_update: Option<msgs::CommitmentUpdate>,
577 pub funding_signed: Option<msgs::FundingSigned>,
578 pub funding_created: Option<msgs::FundingCreated>,
579 pub channel_ready: Option<msgs::ChannelReady>,
582 /// The return value of `channel_reestablish`
583 pub(super) struct ReestablishResponses {
584 pub channel_ready: Option<msgs::ChannelReady>,
585 pub raa: Option<msgs::RevokeAndACK>,
586 pub commitment_update: Option<msgs::CommitmentUpdate>,
587 pub order: RAACommitmentOrder,
588 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
589 pub shutdown_msg: Option<msgs::Shutdown>,
592 /// The result of a shutdown that should be handled.
594 pub(crate) struct ShutdownResult {
595 /// A channel monitor update to apply.
596 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
597 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
598 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
599 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
600 /// propagated to the remainder of the batch.
601 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
602 pub(crate) channel_id: ChannelId,
603 pub(crate) counterparty_node_id: PublicKey,
606 /// If the majority of the channels funds are to the fundee and the initiator holds only just
607 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
608 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
609 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
610 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
611 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
612 /// by this multiple without hitting this case, before sending.
613 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
614 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
615 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
616 /// leave the channel less usable as we hold a bigger reserve.
617 #[cfg(any(fuzzing, test))]
618 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
619 #[cfg(not(any(fuzzing, test)))]
620 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
622 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
623 /// channel creation on an inbound channel, we simply force-close and move on.
624 /// This constant is the one suggested in BOLT 2.
625 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
627 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
628 /// not have enough balance value remaining to cover the onchain cost of this new
629 /// HTLC weight. If this happens, our counterparty fails the reception of our
630 /// commitment_signed including this new HTLC due to infringement on the channel
632 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
633 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
634 /// leads to a channel force-close. Ultimately, this is an issue coming from the
635 /// design of LN state machines, allowing asynchronous updates.
636 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
638 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
639 /// commitment transaction fees, with at least this many HTLCs present on the commitment
640 /// transaction (not counting the value of the HTLCs themselves).
641 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
643 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
644 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
645 /// ChannelUpdate prompted by the config update. This value was determined as follows:
647 /// * The expected interval between ticks (1 minute).
648 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
649 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
650 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
651 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
653 /// The number of ticks that may elapse while we're waiting for a response to a
654 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
657 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
658 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
660 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
661 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
662 /// exceeding this age limit will be force-closed and purged from memory.
663 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
665 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
666 pub(crate) const COINBASE_MATURITY: u32 = 100;
668 struct PendingChannelMonitorUpdate {
669 update: ChannelMonitorUpdate,
672 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
673 (0, update, required),
676 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
677 /// its variants containing an appropriate channel struct.
678 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
679 UnfundedOutboundV1(OutboundV1Channel<SP>),
680 UnfundedInboundV1(InboundV1Channel<SP>),
684 impl<'a, SP: Deref> ChannelPhase<SP> where
685 SP::Target: SignerProvider,
686 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
688 pub fn context(&'a self) -> &'a ChannelContext<SP> {
690 ChannelPhase::Funded(chan) => &chan.context,
691 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
692 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
696 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
698 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
699 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
700 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
705 /// Contains all state common to unfunded inbound/outbound channels.
706 pub(super) struct UnfundedChannelContext {
707 /// A counter tracking how many ticks have elapsed since this unfunded channel was
708 /// created. If this unfunded channel reaches peer has yet to respond after reaching
709 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
711 /// This is so that we don't keep channels around that haven't progressed to a funded state
712 /// in a timely manner.
713 unfunded_channel_age_ticks: usize,
716 impl UnfundedChannelContext {
717 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
718 /// having reached the unfunded channel age limit.
720 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
721 pub fn should_expire_unfunded_channel(&mut self) -> bool {
722 self.unfunded_channel_age_ticks += 1;
723 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
727 /// Contains everything about the channel including state, and various flags.
728 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
729 config: LegacyChannelConfig,
731 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
732 // constructed using it. The second element in the tuple corresponds to the number of ticks that
733 // have elapsed since the update occurred.
734 prev_config: Option<(ChannelConfig, usize)>,
736 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
740 /// The current channel ID.
741 channel_id: ChannelId,
742 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
743 /// Will be `None` for channels created prior to 0.0.115.
744 temporary_channel_id: Option<ChannelId>,
747 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
748 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
750 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
751 // Note that a number of our tests were written prior to the behavior here which retransmits
752 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
754 #[cfg(any(test, feature = "_test_utils"))]
755 pub(crate) announcement_sigs_state: AnnouncementSigsState,
756 #[cfg(not(any(test, feature = "_test_utils")))]
757 announcement_sigs_state: AnnouncementSigsState,
759 secp_ctx: Secp256k1<secp256k1::All>,
760 channel_value_satoshis: u64,
762 latest_monitor_update_id: u64,
764 holder_signer: ChannelSignerType<SP>,
765 shutdown_scriptpubkey: Option<ShutdownScript>,
766 destination_script: ScriptBuf,
768 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
769 // generation start at 0 and count up...this simplifies some parts of implementation at the
770 // cost of others, but should really just be changed.
772 cur_holder_commitment_transaction_number: u64,
773 cur_counterparty_commitment_transaction_number: u64,
774 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
775 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
776 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
777 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
779 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
780 /// need to ensure we resend them in the order we originally generated them. Note that because
781 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
782 /// sufficient to simply set this to the opposite of any message we are generating as we
783 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
784 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
786 resend_order: RAACommitmentOrder,
788 monitor_pending_channel_ready: bool,
789 monitor_pending_revoke_and_ack: bool,
790 monitor_pending_commitment_signed: bool,
792 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
793 // responsible for some of the HTLCs here or not - we don't know whether the update in question
794 // completed or not. We currently ignore these fields entirely when force-closing a channel,
795 // but need to handle this somehow or we run the risk of losing HTLCs!
796 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
797 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
798 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
800 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
801 /// but our signer (initially) refused to give us a signature, we should retry at some point in
802 /// the future when the signer indicates it may have a signature for us.
804 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
805 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
806 signer_pending_commitment_update: bool,
807 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
808 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
809 /// outbound or inbound.
810 signer_pending_funding: bool,
812 // pending_update_fee is filled when sending and receiving update_fee.
814 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
815 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
816 // generating new commitment transactions with exactly the same criteria as inbound/outbound
817 // HTLCs with similar state.
818 pending_update_fee: Option<(u32, FeeUpdateState)>,
819 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
820 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
821 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
822 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
823 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
824 holding_cell_update_fee: Option<u32>,
825 next_holder_htlc_id: u64,
826 next_counterparty_htlc_id: u64,
829 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
830 /// when the channel is updated in ways which may impact the `channel_update` message or when a
831 /// new block is received, ensuring it's always at least moderately close to the current real
833 update_time_counter: u32,
835 #[cfg(debug_assertions)]
836 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
837 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
838 #[cfg(debug_assertions)]
839 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
840 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
842 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
843 target_closing_feerate_sats_per_kw: Option<u32>,
845 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
846 /// update, we need to delay processing it until later. We do that here by simply storing the
847 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
848 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
850 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
851 /// transaction. These are set once we reach `closing_negotiation_ready`.
853 pub(crate) closing_fee_limits: Option<(u64, u64)>,
855 closing_fee_limits: Option<(u64, u64)>,
857 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
858 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
859 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
860 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
861 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
863 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
864 /// until we see a `commitment_signed` before doing so.
866 /// We don't bother to persist this - we anticipate this state won't last longer than a few
867 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
868 expecting_peer_commitment_signed: bool,
870 /// The hash of the block in which the funding transaction was included.
871 funding_tx_confirmed_in: Option<BlockHash>,
872 funding_tx_confirmation_height: u32,
873 short_channel_id: Option<u64>,
874 /// Either the height at which this channel was created or the height at which it was last
875 /// serialized if it was serialized by versions prior to 0.0.103.
876 /// We use this to close if funding is never broadcasted.
877 channel_creation_height: u32,
879 counterparty_dust_limit_satoshis: u64,
882 pub(super) holder_dust_limit_satoshis: u64,
884 holder_dust_limit_satoshis: u64,
887 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
889 counterparty_max_htlc_value_in_flight_msat: u64,
892 pub(super) holder_max_htlc_value_in_flight_msat: u64,
894 holder_max_htlc_value_in_flight_msat: u64,
896 /// minimum channel reserve for self to maintain - set by them.
897 counterparty_selected_channel_reserve_satoshis: Option<u64>,
900 pub(super) holder_selected_channel_reserve_satoshis: u64,
902 holder_selected_channel_reserve_satoshis: u64,
904 counterparty_htlc_minimum_msat: u64,
905 holder_htlc_minimum_msat: u64,
907 pub counterparty_max_accepted_htlcs: u16,
909 counterparty_max_accepted_htlcs: u16,
910 holder_max_accepted_htlcs: u16,
911 minimum_depth: Option<u32>,
913 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
915 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
916 funding_transaction: Option<Transaction>,
917 is_batch_funding: Option<()>,
919 counterparty_cur_commitment_point: Option<PublicKey>,
920 counterparty_prev_commitment_point: Option<PublicKey>,
921 counterparty_node_id: PublicKey,
923 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
925 commitment_secrets: CounterpartyCommitmentSecrets,
927 channel_update_status: ChannelUpdateStatus,
928 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
929 /// not complete within a single timer tick (one minute), we should force-close the channel.
930 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
932 /// Note that this field is reset to false on deserialization to give us a chance to connect to
933 /// our peer and start the closing_signed negotiation fresh.
934 closing_signed_in_flight: bool,
936 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
937 /// This can be used to rebroadcast the channel_announcement message later.
938 announcement_sigs: Option<(Signature, Signature)>,
940 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
941 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
942 // be, by comparing the cached values to the fee of the tranaction generated by
943 // `build_commitment_transaction`.
944 #[cfg(any(test, fuzzing))]
945 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
946 #[cfg(any(test, fuzzing))]
947 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
949 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
950 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
951 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
952 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
953 /// message until we receive a channel_reestablish.
955 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
956 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
958 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
959 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
960 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
961 /// unblock the state machine.
963 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
964 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
965 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
967 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
968 /// [`msgs::RevokeAndACK`] message from the counterparty.
969 sent_message_awaiting_response: Option<usize>,
971 #[cfg(any(test, fuzzing))]
972 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
973 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
974 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
975 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
976 // is fine, but as a sanity check in our failure to generate the second claim, we check here
977 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
978 historical_inbound_htlc_fulfills: HashSet<u64>,
980 /// This channel's type, as negotiated during channel open
981 channel_type: ChannelTypeFeatures,
983 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
984 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
985 // the channel's funding UTXO.
987 // We also use this when sending our peer a channel_update that isn't to be broadcasted
988 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
989 // associated channel mapping.
991 // We only bother storing the most recent SCID alias at any time, though our counterparty has
992 // to store all of them.
993 latest_inbound_scid_alias: Option<u64>,
995 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
996 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
997 // don't currently support node id aliases and eventually privacy should be provided with
998 // blinded paths instead of simple scid+node_id aliases.
999 outbound_scid_alias: u64,
1001 // We track whether we already emitted a `ChannelPending` event.
1002 channel_pending_event_emitted: bool,
1004 // We track whether we already emitted a `ChannelReady` event.
1005 channel_ready_event_emitted: bool,
1007 /// The unique identifier used to re-derive the private key material for the channel through
1008 /// [`SignerProvider::derive_channel_signer`].
1009 channel_keys_id: [u8; 32],
1011 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1012 /// store it here and only release it to the `ChannelManager` once it asks for it.
1013 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1016 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1017 /// Allowed in any state (including after shutdown)
1018 pub fn get_update_time_counter(&self) -> u32 {
1019 self.update_time_counter
1022 pub fn get_latest_monitor_update_id(&self) -> u64 {
1023 self.latest_monitor_update_id
1026 pub fn should_announce(&self) -> bool {
1027 self.config.announced_channel
1030 pub fn is_outbound(&self) -> bool {
1031 self.channel_transaction_parameters.is_outbound_from_holder
1034 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1035 /// Allowed in any state (including after shutdown)
1036 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1037 self.config.options.forwarding_fee_base_msat
1040 /// Returns true if we've ever received a message from the remote end for this Channel
1041 pub fn have_received_message(&self) -> bool {
1042 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1045 /// Returns true if this channel is fully established and not known to be closing.
1046 /// Allowed in any state (including after shutdown)
1047 pub fn is_usable(&self) -> bool {
1048 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1049 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1052 /// shutdown state returns the state of the channel in its various stages of shutdown
1053 pub fn shutdown_state(&self) -> ChannelShutdownState {
1054 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1055 return ChannelShutdownState::ShutdownComplete;
1057 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1058 return ChannelShutdownState::ShutdownInitiated;
1060 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1061 return ChannelShutdownState::ResolvingHTLCs;
1063 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1064 return ChannelShutdownState::NegotiatingClosingFee;
1066 return ChannelShutdownState::NotShuttingDown;
1069 fn closing_negotiation_ready(&self) -> bool {
1070 self.pending_inbound_htlcs.is_empty() &&
1071 self.pending_outbound_htlcs.is_empty() &&
1072 self.pending_update_fee.is_none() &&
1073 self.channel_state &
1074 (BOTH_SIDES_SHUTDOWN_MASK |
1075 ChannelState::AwaitingRemoteRevoke as u32 |
1076 ChannelState::PeerDisconnected as u32 |
1077 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1080 /// Returns true if this channel is currently available for use. This is a superset of
1081 /// is_usable() and considers things like the channel being temporarily disabled.
1082 /// Allowed in any state (including after shutdown)
1083 pub fn is_live(&self) -> bool {
1084 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1087 // Public utilities:
1089 pub fn channel_id(&self) -> ChannelId {
1093 // Return the `temporary_channel_id` used during channel establishment.
1095 // Will return `None` for channels created prior to LDK version 0.0.115.
1096 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1097 self.temporary_channel_id
1100 pub fn minimum_depth(&self) -> Option<u32> {
1104 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1105 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1106 pub fn get_user_id(&self) -> u128 {
1110 /// Gets the channel's type
1111 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1115 /// Gets the channel's `short_channel_id`.
1117 /// Will return `None` if the channel hasn't been confirmed yet.
1118 pub fn get_short_channel_id(&self) -> Option<u64> {
1119 self.short_channel_id
1122 /// Allowed in any state (including after shutdown)
1123 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1124 self.latest_inbound_scid_alias
1127 /// Allowed in any state (including after shutdown)
1128 pub fn outbound_scid_alias(&self) -> u64 {
1129 self.outbound_scid_alias
1132 /// Returns the holder signer for this channel.
1134 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1135 return &self.holder_signer
1138 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1139 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1140 /// or prior to any channel actions during `Channel` initialization.
1141 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1142 debug_assert_eq!(self.outbound_scid_alias, 0);
1143 self.outbound_scid_alias = outbound_scid_alias;
1146 /// Returns the funding_txo we either got from our peer, or were given by
1147 /// get_funding_created.
1148 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1149 self.channel_transaction_parameters.funding_outpoint
1152 /// Returns the height in which our funding transaction was confirmed.
1153 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1154 let conf_height = self.funding_tx_confirmation_height;
1155 if conf_height > 0 {
1162 /// Returns the block hash in which our funding transaction was confirmed.
1163 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1164 self.funding_tx_confirmed_in
1167 /// Returns the current number of confirmations on the funding transaction.
1168 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1169 if self.funding_tx_confirmation_height == 0 {
1170 // We either haven't seen any confirmation yet, or observed a reorg.
1174 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1177 fn get_holder_selected_contest_delay(&self) -> u16 {
1178 self.channel_transaction_parameters.holder_selected_contest_delay
1181 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1182 &self.channel_transaction_parameters.holder_pubkeys
1185 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1186 self.channel_transaction_parameters.counterparty_parameters
1187 .as_ref().map(|params| params.selected_contest_delay)
1190 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1191 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1194 /// Allowed in any state (including after shutdown)
1195 pub fn get_counterparty_node_id(&self) -> PublicKey {
1196 self.counterparty_node_id
1199 /// Allowed in any state (including after shutdown)
1200 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1201 self.holder_htlc_minimum_msat
1204 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1205 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1206 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1209 /// Allowed in any state (including after shutdown)
1210 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1212 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1213 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1214 // channel might have been used to route very small values (either by honest users or as DoS).
1215 self.channel_value_satoshis * 1000 * 9 / 10,
1217 self.counterparty_max_htlc_value_in_flight_msat
1221 /// Allowed in any state (including after shutdown)
1222 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1223 self.counterparty_htlc_minimum_msat
1226 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1227 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1228 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1231 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1232 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1233 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1235 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1236 party_max_htlc_value_in_flight_msat
1241 pub fn get_value_satoshis(&self) -> u64 {
1242 self.channel_value_satoshis
1245 pub fn get_fee_proportional_millionths(&self) -> u32 {
1246 self.config.options.forwarding_fee_proportional_millionths
1249 pub fn get_cltv_expiry_delta(&self) -> u16 {
1250 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1253 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1254 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1255 where F::Target: FeeEstimator
1257 match self.config.options.max_dust_htlc_exposure {
1258 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1259 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1260 ConfirmationTarget::OnChainSweep) as u64;
1261 feerate_per_kw.saturating_mul(multiplier)
1263 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1267 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1268 pub fn prev_config(&self) -> Option<ChannelConfig> {
1269 self.prev_config.map(|prev_config| prev_config.0)
1272 // Checks whether we should emit a `ChannelPending` event.
1273 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1274 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1277 // Returns whether we already emitted a `ChannelPending` event.
1278 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1279 self.channel_pending_event_emitted
1282 // Remembers that we already emitted a `ChannelPending` event.
1283 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1284 self.channel_pending_event_emitted = true;
1287 // Checks whether we should emit a `ChannelReady` event.
1288 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1289 self.is_usable() && !self.channel_ready_event_emitted
1292 // Remembers that we already emitted a `ChannelReady` event.
1293 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1294 self.channel_ready_event_emitted = true;
1297 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1298 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1299 /// no longer be considered when forwarding HTLCs.
1300 pub fn maybe_expire_prev_config(&mut self) {
1301 if self.prev_config.is_none() {
1304 let prev_config = self.prev_config.as_mut().unwrap();
1306 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1307 self.prev_config = None;
1311 /// Returns the current [`ChannelConfig`] applied to the channel.
1312 pub fn config(&self) -> ChannelConfig {
1316 /// Updates the channel's config. A bool is returned indicating whether the config update
1317 /// applied resulted in a new ChannelUpdate message.
1318 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1319 let did_channel_update =
1320 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1321 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1322 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1323 if did_channel_update {
1324 self.prev_config = Some((self.config.options, 0));
1325 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1326 // policy change to propagate throughout the network.
1327 self.update_time_counter += 1;
1329 self.config.options = *config;
1333 /// Returns true if funding_signed was sent/received and the
1334 /// funding transaction has been broadcast if necessary.
1335 pub fn is_funding_broadcast(&self) -> bool {
1336 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1337 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1340 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1341 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1342 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1343 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1344 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1346 /// @local is used only to convert relevant internal structures which refer to remote vs local
1347 /// to decide value of outputs and direction of HTLCs.
1348 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1349 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1350 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1351 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1352 /// which peer generated this transaction and "to whom" this transaction flows.
1354 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1355 where L::Target: Logger
1357 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1358 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1359 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1361 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1362 let mut remote_htlc_total_msat = 0;
1363 let mut local_htlc_total_msat = 0;
1364 let mut value_to_self_msat_offset = 0;
1366 let mut feerate_per_kw = self.feerate_per_kw;
1367 if let Some((feerate, update_state)) = self.pending_update_fee {
1368 if match update_state {
1369 // Note that these match the inclusion criteria when scanning
1370 // pending_inbound_htlcs below.
1371 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1372 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1373 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1375 feerate_per_kw = feerate;
1379 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1380 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1381 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1383 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1385 macro_rules! get_htlc_in_commitment {
1386 ($htlc: expr, $offered: expr) => {
1387 HTLCOutputInCommitment {
1389 amount_msat: $htlc.amount_msat,
1390 cltv_expiry: $htlc.cltv_expiry,
1391 payment_hash: $htlc.payment_hash,
1392 transaction_output_index: None
1397 macro_rules! add_htlc_output {
1398 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1399 if $outbound == local { // "offered HTLC output"
1400 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1401 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1404 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1406 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1407 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1408 included_non_dust_htlcs.push((htlc_in_tx, $source));
1410 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1411 included_dust_htlcs.push((htlc_in_tx, $source));
1414 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1415 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1418 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1420 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1421 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1422 included_non_dust_htlcs.push((htlc_in_tx, $source));
1424 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1425 included_dust_htlcs.push((htlc_in_tx, $source));
1431 for ref htlc in self.pending_inbound_htlcs.iter() {
1432 let (include, state_name) = match htlc.state {
1433 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1434 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1435 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1436 InboundHTLCState::Committed => (true, "Committed"),
1437 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1441 add_htlc_output!(htlc, false, None, state_name);
1442 remote_htlc_total_msat += htlc.amount_msat;
1444 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1446 &InboundHTLCState::LocalRemoved(ref reason) => {
1447 if generated_by_local {
1448 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
1449 value_to_self_msat_offset += htlc.amount_msat as i64;
1458 let mut preimages: Vec<PaymentPreimage> = Vec::new();
1460 for ref htlc in self.pending_outbound_htlcs.iter() {
1461 let (include, state_name) = match htlc.state {
1462 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1463 OutboundHTLCState::Committed => (true, "Committed"),
1464 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1465 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1466 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1469 let preimage_opt = match htlc.state {
1470 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1471 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1472 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1476 if let Some(preimage) = preimage_opt {
1477 preimages.push(preimage);
1481 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1482 local_htlc_total_msat += htlc.amount_msat;
1484 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1486 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1487 value_to_self_msat_offset -= htlc.amount_msat as i64;
1489 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1490 if !generated_by_local {
1491 value_to_self_msat_offset -= htlc.amount_msat as i64;
1499 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1500 assert!(value_to_self_msat >= 0);
1501 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1502 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1503 // "violate" their reserve value by couting those against it. Thus, we have to convert
1504 // everything to i64 before subtracting as otherwise we can overflow.
1505 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1506 assert!(value_to_remote_msat >= 0);
1508 #[cfg(debug_assertions)]
1510 // Make sure that the to_self/to_remote is always either past the appropriate
1511 // channel_reserve *or* it is making progress towards it.
1512 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1513 self.holder_max_commitment_tx_output.lock().unwrap()
1515 self.counterparty_max_commitment_tx_output.lock().unwrap()
1517 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1518 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1519 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1520 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1523 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1524 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1525 let (value_to_self, value_to_remote) = if self.is_outbound() {
1526 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1528 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1531 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1532 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1533 let (funding_pubkey_a, funding_pubkey_b) = if local {
1534 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1536 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1539 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1540 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1545 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1546 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1551 let num_nondust_htlcs = included_non_dust_htlcs.len();
1553 let channel_parameters =
1554 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1555 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1556 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1563 &mut included_non_dust_htlcs,
1566 let mut htlcs_included = included_non_dust_htlcs;
1567 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1568 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1569 htlcs_included.append(&mut included_dust_htlcs);
1571 // For the stats, trimmed-to-0 the value in msats accordingly
1572 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1573 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1581 local_balance_msat: value_to_self_msat as u64,
1582 remote_balance_msat: value_to_remote_msat as u64,
1588 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1589 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1590 /// our counterparty!)
1591 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1592 /// TODO Some magic rust shit to compile-time check this?
1593 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1594 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1595 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1596 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1597 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1599 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1603 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1604 /// will sign and send to our counterparty.
1605 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1606 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1607 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1608 //may see payments to it!
1609 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1610 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1611 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1613 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1616 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1617 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1618 /// Panics if called before accept_channel/InboundV1Channel::new
1619 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1620 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1623 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1624 &self.get_counterparty_pubkeys().funding_pubkey
1627 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1631 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1632 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1633 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1634 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1635 // more dust balance if the feerate increases when we have several HTLCs pending
1636 // which are near the dust limit.
1637 let mut feerate_per_kw = self.feerate_per_kw;
1638 // If there's a pending update fee, use it to ensure we aren't under-estimating
1639 // potential feerate updates coming soon.
1640 if let Some((feerate, _)) = self.pending_update_fee {
1641 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1643 if let Some(feerate) = outbound_feerate_update {
1644 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1646 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1649 /// Get forwarding information for the counterparty.
1650 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1651 self.counterparty_forwarding_info.clone()
1654 /// Returns a HTLCStats about inbound pending htlcs
1655 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1657 let mut stats = HTLCStats {
1658 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1659 pending_htlcs_value_msat: 0,
1660 on_counterparty_tx_dust_exposure_msat: 0,
1661 on_holder_tx_dust_exposure_msat: 0,
1662 holding_cell_msat: 0,
1663 on_holder_tx_holding_cell_htlcs_count: 0,
1666 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1669 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1670 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1671 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1673 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1674 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1675 for ref htlc in context.pending_inbound_htlcs.iter() {
1676 stats.pending_htlcs_value_msat += htlc.amount_msat;
1677 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1678 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1680 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1681 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1687 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1688 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1690 let mut stats = HTLCStats {
1691 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1692 pending_htlcs_value_msat: 0,
1693 on_counterparty_tx_dust_exposure_msat: 0,
1694 on_holder_tx_dust_exposure_msat: 0,
1695 holding_cell_msat: 0,
1696 on_holder_tx_holding_cell_htlcs_count: 0,
1699 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1702 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1703 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1704 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1706 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1707 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1708 for ref htlc in context.pending_outbound_htlcs.iter() {
1709 stats.pending_htlcs_value_msat += htlc.amount_msat;
1710 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1711 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1713 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1714 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1718 for update in context.holding_cell_htlc_updates.iter() {
1719 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1720 stats.pending_htlcs += 1;
1721 stats.pending_htlcs_value_msat += amount_msat;
1722 stats.holding_cell_msat += amount_msat;
1723 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1724 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1726 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1727 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1729 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1736 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1737 /// Doesn't bother handling the
1738 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1739 /// corner case properly.
1740 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1741 -> AvailableBalances
1742 where F::Target: FeeEstimator
1744 let context = &self;
1745 // Note that we have to handle overflow due to the above case.
1746 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1747 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1749 let mut balance_msat = context.value_to_self_msat;
1750 for ref htlc in context.pending_inbound_htlcs.iter() {
1751 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1752 balance_msat += htlc.amount_msat;
1755 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1757 let outbound_capacity_msat = context.value_to_self_msat
1758 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1760 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1762 let mut available_capacity_msat = outbound_capacity_msat;
1764 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1765 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1769 if context.is_outbound() {
1770 // We should mind channel commit tx fee when computing how much of the available capacity
1771 // can be used in the next htlc. Mirrors the logic in send_htlc.
1773 // The fee depends on whether the amount we will be sending is above dust or not,
1774 // and the answer will in turn change the amount itself — making it a circular
1776 // This complicates the computation around dust-values, up to the one-htlc-value.
1777 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1778 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1779 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1782 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1783 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1784 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1785 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1786 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1787 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1788 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1791 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1792 // value ends up being below dust, we have this fee available again. In that case,
1793 // match the value to right-below-dust.
1794 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1795 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1796 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1797 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1798 debug_assert!(one_htlc_difference_msat != 0);
1799 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1800 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1801 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1803 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1806 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1807 // sending a new HTLC won't reduce their balance below our reserve threshold.
1808 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1809 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1810 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1813 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1814 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1816 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1817 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1818 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1820 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1821 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1822 // we've selected for them, we can only send dust HTLCs.
1823 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1827 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1829 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1830 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1831 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1832 // send above the dust limit (as the router can always overpay to meet the dust limit).
1833 let mut remaining_msat_below_dust_exposure_limit = None;
1834 let mut dust_exposure_dust_limit_msat = 0;
1835 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1837 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1838 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1840 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1841 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1842 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1844 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1845 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1846 remaining_msat_below_dust_exposure_limit =
1847 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1848 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1851 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1852 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1853 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1854 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1855 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1856 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1859 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1860 if available_capacity_msat < dust_exposure_dust_limit_msat {
1861 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1863 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1867 available_capacity_msat = cmp::min(available_capacity_msat,
1868 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1870 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1871 available_capacity_msat = 0;
1875 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1876 - context.value_to_self_msat as i64
1877 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1878 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1880 outbound_capacity_msat,
1881 next_outbound_htlc_limit_msat: available_capacity_msat,
1882 next_outbound_htlc_minimum_msat,
1887 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1888 let context = &self;
1889 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1892 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1893 /// number of pending HTLCs that are on track to be in our next commitment tx.
1895 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1896 /// `fee_spike_buffer_htlc` is `Some`.
1898 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1899 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1901 /// Dust HTLCs are excluded.
1902 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1903 let context = &self;
1904 assert!(context.is_outbound());
1906 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1909 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1910 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1912 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1913 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1915 let mut addl_htlcs = 0;
1916 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1918 HTLCInitiator::LocalOffered => {
1919 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1923 HTLCInitiator::RemoteOffered => {
1924 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1930 let mut included_htlcs = 0;
1931 for ref htlc in context.pending_inbound_htlcs.iter() {
1932 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1935 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1936 // transaction including this HTLC if it times out before they RAA.
1937 included_htlcs += 1;
1940 for ref htlc in context.pending_outbound_htlcs.iter() {
1941 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1945 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1946 OutboundHTLCState::Committed => included_htlcs += 1,
1947 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1948 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1949 // transaction won't be generated until they send us their next RAA, which will mean
1950 // dropping any HTLCs in this state.
1955 for htlc in context.holding_cell_htlc_updates.iter() {
1957 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1958 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1963 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1964 // ack we're guaranteed to never include them in commitment txs anymore.
1968 let num_htlcs = included_htlcs + addl_htlcs;
1969 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1970 #[cfg(any(test, fuzzing))]
1973 if fee_spike_buffer_htlc.is_some() {
1974 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1976 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1977 + context.holding_cell_htlc_updates.len();
1978 let commitment_tx_info = CommitmentTxInfoCached {
1980 total_pending_htlcs,
1981 next_holder_htlc_id: match htlc.origin {
1982 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1983 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1985 next_counterparty_htlc_id: match htlc.origin {
1986 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1987 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1989 feerate: context.feerate_per_kw,
1991 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
1996 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
1997 /// pending HTLCs that are on track to be in their next commitment tx
1999 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2000 /// `fee_spike_buffer_htlc` is `Some`.
2002 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2003 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2005 /// Dust HTLCs are excluded.
2006 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2007 let context = &self;
2008 assert!(!context.is_outbound());
2010 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2013 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2014 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2016 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2017 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2019 let mut addl_htlcs = 0;
2020 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2022 HTLCInitiator::LocalOffered => {
2023 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2027 HTLCInitiator::RemoteOffered => {
2028 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2034 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2035 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2036 // committed outbound HTLCs, see below.
2037 let mut included_htlcs = 0;
2038 for ref htlc in context.pending_inbound_htlcs.iter() {
2039 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2042 included_htlcs += 1;
2045 for ref htlc in context.pending_outbound_htlcs.iter() {
2046 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2049 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2050 // i.e. if they've responded to us with an RAA after announcement.
2052 OutboundHTLCState::Committed => included_htlcs += 1,
2053 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2054 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2059 let num_htlcs = included_htlcs + addl_htlcs;
2060 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2061 #[cfg(any(test, fuzzing))]
2064 if fee_spike_buffer_htlc.is_some() {
2065 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2067 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2068 let commitment_tx_info = CommitmentTxInfoCached {
2070 total_pending_htlcs,
2071 next_holder_htlc_id: match htlc.origin {
2072 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2073 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2075 next_counterparty_htlc_id: match htlc.origin {
2076 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2077 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2079 feerate: context.feerate_per_kw,
2081 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2086 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2087 where F: Fn() -> Option<O> {
2088 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2089 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2096 /// Returns the transaction if there is a pending funding transaction that is yet to be
2098 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2099 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2102 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2104 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2105 self.if_unbroadcasted_funding(||
2106 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2110 /// Returns whether the channel is funded in a batch.
2111 pub fn is_batch_funding(&self) -> bool {
2112 self.is_batch_funding.is_some()
2115 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2117 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2118 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2121 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2122 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2123 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2124 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2125 /// immediately (others we will have to allow to time out).
2126 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2127 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2128 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2129 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2130 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2131 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2133 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2134 // return them to fail the payment.
2135 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2136 let counterparty_node_id = self.get_counterparty_node_id();
2137 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2139 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2140 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2145 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2146 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2147 // returning a channel monitor update here would imply a channel monitor update before
2148 // we even registered the channel monitor to begin with, which is invalid.
2149 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2150 // funding transaction, don't return a funding txo (which prevents providing the
2151 // monitor update to the user, even if we return one).
2152 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2153 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2154 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2155 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2156 update_id: self.latest_monitor_update_id,
2157 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2161 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2163 self.channel_state = ChannelState::ShutdownComplete as u32;
2164 self.update_time_counter += 1;
2167 dropped_outbound_htlcs,
2168 unbroadcasted_batch_funding_txid,
2169 channel_id: self.channel_id,
2170 counterparty_node_id: self.counterparty_node_id,
2174 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2175 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2176 let counterparty_keys = self.build_remote_transaction_keys();
2177 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2178 let signature = match &self.holder_signer {
2179 // TODO (taproot|arik): move match into calling method for Taproot
2180 ChannelSignerType::Ecdsa(ecdsa) => {
2181 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2182 .map(|(sig, _)| sig).ok()?
2184 // TODO (taproot|arik)
2189 if self.signer_pending_funding {
2190 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2191 self.signer_pending_funding = false;
2194 Some(msgs::FundingCreated {
2195 temporary_channel_id: self.temporary_channel_id.unwrap(),
2196 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2197 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2200 partial_signature_with_nonce: None,
2202 next_local_nonce: None,
2206 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2207 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2208 let counterparty_keys = self.build_remote_transaction_keys();
2209 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2211 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2212 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2213 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2214 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2216 match &self.holder_signer {
2217 // TODO (arik): move match into calling method for Taproot
2218 ChannelSignerType::Ecdsa(ecdsa) => {
2219 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), &self.secp_ctx)
2220 .map(|(signature, _)| msgs::FundingSigned {
2221 channel_id: self.channel_id(),
2224 partial_signature_with_nonce: None,
2228 if funding_signed.is_none() {
2229 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2230 self.signer_pending_funding = true;
2231 } else if self.signer_pending_funding {
2232 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2233 self.signer_pending_funding = false;
2236 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2237 (counterparty_initial_commitment_tx, funding_signed)
2239 // TODO (taproot|arik)
2246 // Internal utility functions for channels
2248 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2249 /// `channel_value_satoshis` in msat, set through
2250 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2252 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2254 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2255 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2256 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2258 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2261 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2263 channel_value_satoshis * 10 * configured_percent
2266 /// Returns a minimum channel reserve value the remote needs to maintain,
2267 /// required by us according to the configured or default
2268 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2270 /// Guaranteed to return a value no larger than channel_value_satoshis
2272 /// This is used both for outbound and inbound channels and has lower bound
2273 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2274 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2275 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2276 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2279 /// This is for legacy reasons, present for forward-compatibility.
2280 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2281 /// from storage. Hence, we use this function to not persist default values of
2282 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2283 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2284 let (q, _) = channel_value_satoshis.overflowing_div(100);
2285 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2288 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2289 // Note that num_htlcs should not include dust HTLCs.
2291 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2292 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2295 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2296 // Note that num_htlcs should not include dust HTLCs.
2297 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2298 // Note that we need to divide before multiplying to round properly,
2299 // since the lowest denomination of bitcoin on-chain is the satoshi.
2300 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2303 // Holder designates channel data owned for the benefit of the user client.
2304 // Counterparty designates channel data owned by the another channel participant entity.
2305 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2306 pub context: ChannelContext<SP>,
2309 #[cfg(any(test, fuzzing))]
2310 struct CommitmentTxInfoCached {
2312 total_pending_htlcs: usize,
2313 next_holder_htlc_id: u64,
2314 next_counterparty_htlc_id: u64,
2318 impl<SP: Deref> Channel<SP> where
2319 SP::Target: SignerProvider,
2320 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2322 fn check_remote_fee<F: Deref, L: Deref>(
2323 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2324 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2325 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2327 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2328 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2330 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2332 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2333 if feerate_per_kw < lower_limit {
2334 if let Some(cur_feerate) = cur_feerate_per_kw {
2335 if feerate_per_kw > cur_feerate {
2337 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2338 cur_feerate, feerate_per_kw);
2342 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2348 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2349 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2350 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2351 // outside of those situations will fail.
2352 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2356 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2361 1 + // script length (0)
2365 )*4 + // * 4 for non-witness parts
2366 2 + // witness marker and flag
2367 1 + // witness element count
2368 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2369 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2370 2*(1 + 71); // two signatures + sighash type flags
2371 if let Some(spk) = a_scriptpubkey {
2372 ret += ((8+1) + // output values and script length
2373 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2375 if let Some(spk) = b_scriptpubkey {
2376 ret += ((8+1) + // output values and script length
2377 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2383 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2384 assert!(self.context.pending_inbound_htlcs.is_empty());
2385 assert!(self.context.pending_outbound_htlcs.is_empty());
2386 assert!(self.context.pending_update_fee.is_none());
2388 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2389 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2390 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2392 if value_to_holder < 0 {
2393 assert!(self.context.is_outbound());
2394 total_fee_satoshis += (-value_to_holder) as u64;
2395 } else if value_to_counterparty < 0 {
2396 assert!(!self.context.is_outbound());
2397 total_fee_satoshis += (-value_to_counterparty) as u64;
2400 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2401 value_to_counterparty = 0;
2404 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2405 value_to_holder = 0;
2408 assert!(self.context.shutdown_scriptpubkey.is_some());
2409 let holder_shutdown_script = self.get_closing_scriptpubkey();
2410 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2411 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2413 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2414 (closing_transaction, total_fee_satoshis)
2417 fn funding_outpoint(&self) -> OutPoint {
2418 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2421 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2424 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2425 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2427 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2429 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2430 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2431 where L::Target: Logger {
2432 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2433 // (see equivalent if condition there).
2434 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2435 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2436 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2437 self.context.latest_monitor_update_id = mon_update_id;
2438 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2439 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2443 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2444 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2445 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2446 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2448 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2449 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2451 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2453 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2454 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2455 // these, but for now we just have to treat them as normal.
2457 let mut pending_idx = core::usize::MAX;
2458 let mut htlc_value_msat = 0;
2459 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2460 if htlc.htlc_id == htlc_id_arg {
2461 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2462 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2463 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2465 InboundHTLCState::Committed => {},
2466 InboundHTLCState::LocalRemoved(ref reason) => {
2467 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2469 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2470 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2472 return UpdateFulfillFetch::DuplicateClaim {};
2475 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2476 // Don't return in release mode here so that we can update channel_monitor
2480 htlc_value_msat = htlc.amount_msat;
2484 if pending_idx == core::usize::MAX {
2485 #[cfg(any(test, fuzzing))]
2486 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2487 // this is simply a duplicate claim, not previously failed and we lost funds.
2488 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2489 return UpdateFulfillFetch::DuplicateClaim {};
2492 // Now update local state:
2494 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2495 // can claim it even if the channel hits the chain before we see their next commitment.
2496 self.context.latest_monitor_update_id += 1;
2497 let monitor_update = ChannelMonitorUpdate {
2498 update_id: self.context.latest_monitor_update_id,
2499 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2500 payment_preimage: payment_preimage_arg.clone(),
2504 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2505 // Note that this condition is the same as the assertion in
2506 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2507 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2508 // do not not get into this branch.
2509 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2510 match pending_update {
2511 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2512 if htlc_id_arg == htlc_id {
2513 // Make sure we don't leave latest_monitor_update_id incremented here:
2514 self.context.latest_monitor_update_id -= 1;
2515 #[cfg(any(test, fuzzing))]
2516 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2517 return UpdateFulfillFetch::DuplicateClaim {};
2520 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2521 if htlc_id_arg == htlc_id {
2522 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2523 // TODO: We may actually be able to switch to a fulfill here, though its
2524 // rare enough it may not be worth the complexity burden.
2525 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2526 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2532 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2533 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2534 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2536 #[cfg(any(test, fuzzing))]
2537 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2538 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2540 #[cfg(any(test, fuzzing))]
2541 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2544 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2545 if let InboundHTLCState::Committed = htlc.state {
2547 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2548 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2550 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2551 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2554 UpdateFulfillFetch::NewClaim {
2557 msg: Some(msgs::UpdateFulfillHTLC {
2558 channel_id: self.context.channel_id(),
2559 htlc_id: htlc_id_arg,
2560 payment_preimage: payment_preimage_arg,
2565 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2566 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2567 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2568 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2569 // Even if we aren't supposed to let new monitor updates with commitment state
2570 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2571 // matter what. Sadly, to push a new monitor update which flies before others
2572 // already queued, we have to insert it into the pending queue and update the
2573 // update_ids of all the following monitors.
2574 if release_cs_monitor && msg.is_some() {
2575 let mut additional_update = self.build_commitment_no_status_check(logger);
2576 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2577 // to be strictly increasing by one, so decrement it here.
2578 self.context.latest_monitor_update_id = monitor_update.update_id;
2579 monitor_update.updates.append(&mut additional_update.updates);
2581 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2582 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2583 monitor_update.update_id = new_mon_id;
2584 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2585 held_update.update.update_id += 1;
2588 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2589 let update = self.build_commitment_no_status_check(logger);
2590 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2596 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2597 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2599 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2603 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2604 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2605 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2606 /// before we fail backwards.
2608 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2609 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2610 /// [`ChannelError::Ignore`].
2611 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2612 -> Result<(), ChannelError> where L::Target: Logger {
2613 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2614 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2617 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2618 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2619 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2620 /// before we fail backwards.
2622 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2623 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2624 /// [`ChannelError::Ignore`].
2625 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2626 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2627 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2628 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2630 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2632 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2633 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2634 // these, but for now we just have to treat them as normal.
2636 let mut pending_idx = core::usize::MAX;
2637 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2638 if htlc.htlc_id == htlc_id_arg {
2640 InboundHTLCState::Committed => {},
2641 InboundHTLCState::LocalRemoved(ref reason) => {
2642 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2644 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2649 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2650 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2656 if pending_idx == core::usize::MAX {
2657 #[cfg(any(test, fuzzing))]
2658 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2659 // is simply a duplicate fail, not previously failed and we failed-back too early.
2660 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2664 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2665 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2666 force_holding_cell = true;
2669 // Now update local state:
2670 if force_holding_cell {
2671 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2672 match pending_update {
2673 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2674 if htlc_id_arg == htlc_id {
2675 #[cfg(any(test, fuzzing))]
2676 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2680 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2681 if htlc_id_arg == htlc_id {
2682 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2683 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2689 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2690 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2691 htlc_id: htlc_id_arg,
2697 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2699 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2700 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2703 Ok(Some(msgs::UpdateFailHTLC {
2704 channel_id: self.context.channel_id(),
2705 htlc_id: htlc_id_arg,
2710 // Message handlers:
2712 /// Handles a funding_signed message from the remote end.
2713 /// If this call is successful, broadcast the funding transaction (and not before!)
2714 pub fn funding_signed<L: Deref>(
2715 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2716 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
2720 if !self.context.is_outbound() {
2721 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2723 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2724 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2726 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2727 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2728 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2729 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2732 let funding_script = self.context.get_funding_redeemscript();
2734 let counterparty_keys = self.context.build_remote_transaction_keys();
2735 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2736 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2737 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2739 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2740 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2742 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2743 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2745 let trusted_tx = initial_commitment_tx.trust();
2746 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2747 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2748 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2749 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2750 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2754 let holder_commitment_tx = HolderCommitmentTransaction::new(
2755 initial_commitment_tx,
2758 &self.context.get_holder_pubkeys().funding_pubkey,
2759 self.context.counterparty_funding_pubkey()
2762 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2763 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2766 let funding_redeemscript = self.context.get_funding_redeemscript();
2767 let funding_txo = self.context.get_funding_txo().unwrap();
2768 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2769 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2770 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2771 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2772 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2773 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2774 shutdown_script, self.context.get_holder_selected_contest_delay(),
2775 &self.context.destination_script, (funding_txo, funding_txo_script),
2776 &self.context.channel_transaction_parameters,
2777 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2779 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2780 let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
2781 channel_monitor.provide_initial_counterparty_commitment_tx(
2782 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2783 self.context.cur_counterparty_commitment_transaction_number,
2784 self.context.counterparty_cur_commitment_point.unwrap(),
2785 counterparty_initial_commitment_tx.feerate_per_kw(),
2786 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2787 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
2789 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2790 if self.context.is_batch_funding() {
2791 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2793 self.context.channel_state = ChannelState::FundingSent as u32;
2795 self.context.cur_holder_commitment_transaction_number -= 1;
2796 self.context.cur_counterparty_commitment_transaction_number -= 1;
2798 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2800 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2801 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2805 /// Updates the state of the channel to indicate that all channels in the batch have received
2806 /// funding_signed and persisted their monitors.
2807 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2808 /// treated as a non-batch channel going forward.
2809 pub fn set_batch_ready(&mut self) {
2810 self.context.is_batch_funding = None;
2811 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2814 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2815 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2817 pub fn channel_ready<NS: Deref, L: Deref>(
2818 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2819 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2820 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2822 NS::Target: NodeSigner,
2825 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2826 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2827 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2830 if let Some(scid_alias) = msg.short_channel_id_alias {
2831 if Some(scid_alias) != self.context.short_channel_id {
2832 // The scid alias provided can be used to route payments *from* our counterparty,
2833 // i.e. can be used for inbound payments and provided in invoices, but is not used
2834 // when routing outbound payments.
2835 self.context.latest_inbound_scid_alias = Some(scid_alias);
2839 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2841 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2842 // batch, but we can receive channel_ready messages.
2844 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2845 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2847 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2848 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2849 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2850 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2851 self.context.update_time_counter += 1;
2852 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2853 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2854 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2855 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2857 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2858 // required, or they're sending a fresh SCID alias.
2859 let expected_point =
2860 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2861 // If they haven't ever sent an updated point, the point they send should match
2863 self.context.counterparty_cur_commitment_point
2864 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2865 // If we've advanced the commitment number once, the second commitment point is
2866 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2867 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2868 self.context.counterparty_prev_commitment_point
2870 // If they have sent updated points, channel_ready is always supposed to match
2871 // their "first" point, which we re-derive here.
2872 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2873 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2874 ).expect("We already advanced, so previous secret keys should have been validated already")))
2876 if expected_point != Some(msg.next_per_commitment_point) {
2877 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2881 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2884 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2885 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2887 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2889 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2892 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2893 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2894 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2895 ) -> Result<(), ChannelError>
2896 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2897 FE::Target: FeeEstimator, L::Target: Logger,
2899 // We can't accept HTLCs sent after we've sent a shutdown.
2900 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2901 if local_sent_shutdown {
2902 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2904 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2905 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2906 if remote_sent_shutdown {
2907 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2909 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2910 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2912 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2913 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2915 if msg.amount_msat == 0 {
2916 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2918 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2919 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2922 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2923 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2924 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2925 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2927 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2928 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2931 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2932 // the reserve_satoshis we told them to always have as direct payment so that they lose
2933 // something if we punish them for broadcasting an old state).
2934 // Note that we don't really care about having a small/no to_remote output in our local
2935 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2936 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2937 // present in the next commitment transaction we send them (at least for fulfilled ones,
2938 // failed ones won't modify value_to_self).
2939 // Note that we will send HTLCs which another instance of rust-lightning would think
2940 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2941 // Channel state once they will not be present in the next received commitment
2943 let mut removed_outbound_total_msat = 0;
2944 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2945 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2946 removed_outbound_total_msat += htlc.amount_msat;
2947 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2948 removed_outbound_total_msat += htlc.amount_msat;
2952 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2953 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2956 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2957 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2958 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2960 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2961 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2962 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2963 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2964 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2965 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2966 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2970 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2971 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2972 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2973 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2974 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2975 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2976 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2980 let pending_value_to_self_msat =
2981 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2982 let pending_remote_value_msat =
2983 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2984 if pending_remote_value_msat < msg.amount_msat {
2985 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2988 // Check that the remote can afford to pay for this HTLC on-chain at the current
2989 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2991 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2992 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2993 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
2995 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2996 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3000 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3001 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3003 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3004 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3008 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3009 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3013 if !self.context.is_outbound() {
3014 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3015 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3016 // side, only on the sender's. Note that with anchor outputs we are no longer as
3017 // sensitive to fee spikes, so we need to account for them.
3018 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3019 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3020 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3021 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3023 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3024 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3025 // the HTLC, i.e. its status is already set to failing.
3026 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3027 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3030 // Check that they won't violate our local required channel reserve by adding this HTLC.
3031 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3032 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3033 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3034 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3037 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3038 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3040 if msg.cltv_expiry >= 500000000 {
3041 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3044 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3045 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3046 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3050 // Now update local state:
3051 self.context.next_counterparty_htlc_id += 1;
3052 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3053 htlc_id: msg.htlc_id,
3054 amount_msat: msg.amount_msat,
3055 payment_hash: msg.payment_hash,
3056 cltv_expiry: msg.cltv_expiry,
3057 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3062 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3064 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3065 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3066 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3067 if htlc.htlc_id == htlc_id {
3068 let outcome = match check_preimage {
3069 None => fail_reason.into(),
3070 Some(payment_preimage) => {
3071 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3072 if payment_hash != htlc.payment_hash {
3073 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3075 OutboundHTLCOutcome::Success(Some(payment_preimage))
3079 OutboundHTLCState::LocalAnnounced(_) =>
3080 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3081 OutboundHTLCState::Committed => {
3082 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3084 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3085 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3090 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3093 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3094 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3095 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3097 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3098 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3101 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3104 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3105 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3106 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3108 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3109 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3112 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3116 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3117 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3118 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3120 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3121 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3124 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3128 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3129 where L::Target: Logger
3131 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3132 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3134 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3135 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3137 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3138 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3141 let funding_script = self.context.get_funding_redeemscript();
3143 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3145 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3146 let commitment_txid = {
3147 let trusted_tx = commitment_stats.tx.trust();
3148 let bitcoin_tx = trusted_tx.built_transaction();
3149 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3151 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3152 log_bytes!(msg.signature.serialize_compact()[..]),
3153 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3154 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3155 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3156 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3160 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3162 // If our counterparty updated the channel fee in this commitment transaction, check that
3163 // they can actually afford the new fee now.
3164 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3165 update_state == FeeUpdateState::RemoteAnnounced
3168 debug_assert!(!self.context.is_outbound());
3169 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3170 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3171 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3174 #[cfg(any(test, fuzzing))]
3176 if self.context.is_outbound() {
3177 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3178 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3179 if let Some(info) = projected_commit_tx_info {
3180 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3181 + self.context.holding_cell_htlc_updates.len();
3182 if info.total_pending_htlcs == total_pending_htlcs
3183 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3184 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3185 && info.feerate == self.context.feerate_per_kw {
3186 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3192 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3193 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3196 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3197 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3198 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3199 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3200 // backwards compatibility, we never use it in production. To provide test coverage, here,
3201 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3202 #[allow(unused_assignments, unused_mut)]
3203 let mut separate_nondust_htlc_sources = false;
3204 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3205 use core::hash::{BuildHasher, Hasher};
3206 // Get a random value using the only std API to do so - the DefaultHasher
3207 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3208 separate_nondust_htlc_sources = rand_val % 2 == 0;
3211 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3212 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3213 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3214 if let Some(_) = htlc.transaction_output_index {
3215 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3216 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3217 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3219 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3220 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3221 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3222 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3223 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3224 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3225 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3226 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3228 if !separate_nondust_htlc_sources {
3229 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3232 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3234 if separate_nondust_htlc_sources {
3235 if let Some(source) = source_opt.take() {
3236 nondust_htlc_sources.push(source);
3239 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3242 let holder_commitment_tx = HolderCommitmentTransaction::new(
3243 commitment_stats.tx,
3245 msg.htlc_signatures.clone(),
3246 &self.context.get_holder_pubkeys().funding_pubkey,
3247 self.context.counterparty_funding_pubkey()
3250 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.preimages)
3251 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3253 // Update state now that we've passed all the can-fail calls...
3254 let mut need_commitment = false;
3255 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3256 if *update_state == FeeUpdateState::RemoteAnnounced {
3257 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3258 need_commitment = true;
3262 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3263 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3264 Some(forward_info.clone())
3266 if let Some(forward_info) = new_forward {
3267 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3268 &htlc.payment_hash, &self.context.channel_id);
3269 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3270 need_commitment = true;
3273 let mut claimed_htlcs = Vec::new();
3274 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3275 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3276 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3277 &htlc.payment_hash, &self.context.channel_id);
3278 // Grab the preimage, if it exists, instead of cloning
3279 let mut reason = OutboundHTLCOutcome::Success(None);
3280 mem::swap(outcome, &mut reason);
3281 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3282 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3283 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3284 // have a `Success(None)` reason. In this case we could forget some HTLC
3285 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3286 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3288 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3290 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3291 need_commitment = true;
3295 self.context.latest_monitor_update_id += 1;
3296 let mut monitor_update = ChannelMonitorUpdate {
3297 update_id: self.context.latest_monitor_update_id,
3298 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3299 commitment_tx: holder_commitment_tx,
3300 htlc_outputs: htlcs_and_sigs,
3302 nondust_htlc_sources,
3306 self.context.cur_holder_commitment_transaction_number -= 1;
3307 self.context.expecting_peer_commitment_signed = false;
3308 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3309 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3310 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3312 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3313 // In case we initially failed monitor updating without requiring a response, we need
3314 // to make sure the RAA gets sent first.
3315 self.context.monitor_pending_revoke_and_ack = true;
3316 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3317 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3318 // the corresponding HTLC status updates so that
3319 // get_last_commitment_update_for_send includes the right HTLCs.
3320 self.context.monitor_pending_commitment_signed = true;
3321 let mut additional_update = self.build_commitment_no_status_check(logger);
3322 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3323 // strictly increasing by one, so decrement it here.
3324 self.context.latest_monitor_update_id = monitor_update.update_id;
3325 monitor_update.updates.append(&mut additional_update.updates);
3327 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3328 &self.context.channel_id);
3329 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3332 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3333 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3334 // we'll send one right away when we get the revoke_and_ack when we
3335 // free_holding_cell_htlcs().
3336 let mut additional_update = self.build_commitment_no_status_check(logger);
3337 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3338 // strictly increasing by one, so decrement it here.
3339 self.context.latest_monitor_update_id = monitor_update.update_id;
3340 monitor_update.updates.append(&mut additional_update.updates);
3344 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3345 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3346 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3347 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3350 /// Public version of the below, checking relevant preconditions first.
3351 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3352 /// returns `(None, Vec::new())`.
3353 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3354 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3355 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3356 where F::Target: FeeEstimator, L::Target: Logger
3358 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3359 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3360 self.free_holding_cell_htlcs(fee_estimator, logger)
3361 } else { (None, Vec::new()) }
3364 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3365 /// for our counterparty.
3366 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3367 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3368 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3369 where F::Target: FeeEstimator, L::Target: Logger
3371 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3372 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3373 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3374 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3376 let mut monitor_update = ChannelMonitorUpdate {
3377 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3378 updates: Vec::new(),
3381 let mut htlc_updates = Vec::new();
3382 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3383 let mut update_add_count = 0;
3384 let mut update_fulfill_count = 0;
3385 let mut update_fail_count = 0;
3386 let mut htlcs_to_fail = Vec::new();
3387 for htlc_update in htlc_updates.drain(..) {
3388 // Note that this *can* fail, though it should be due to rather-rare conditions on
3389 // fee races with adding too many outputs which push our total payments just over
3390 // the limit. In case it's less rare than I anticipate, we may want to revisit
3391 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3392 // to rebalance channels.
3393 match &htlc_update {
3394 &HTLCUpdateAwaitingACK::AddHTLC {
3395 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3396 skimmed_fee_msat, blinding_point, ..
3398 match self.send_htlc(
3399 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3400 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3402 Ok(_) => update_add_count += 1,
3405 ChannelError::Ignore(ref msg) => {
3406 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3407 // If we fail to send here, then this HTLC should
3408 // be failed backwards. Failing to send here
3409 // indicates that this HTLC may keep being put back
3410 // into the holding cell without ever being
3411 // successfully forwarded/failed/fulfilled, causing
3412 // our counterparty to eventually close on us.
3413 htlcs_to_fail.push((source.clone(), *payment_hash));
3416 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3422 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3423 // If an HTLC claim was previously added to the holding cell (via
3424 // `get_update_fulfill_htlc`, then generating the claim message itself must
3425 // not fail - any in between attempts to claim the HTLC will have resulted
3426 // in it hitting the holding cell again and we cannot change the state of a
3427 // holding cell HTLC from fulfill to anything else.
3428 let mut additional_monitor_update =
3429 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3430 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3431 { monitor_update } else { unreachable!() };
3432 update_fulfill_count += 1;
3433 monitor_update.updates.append(&mut additional_monitor_update.updates);
3435 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3436 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3437 Ok(update_fail_msg_option) => {
3438 // If an HTLC failure was previously added to the holding cell (via
3439 // `queue_fail_htlc`) then generating the fail message itself must
3440 // not fail - we should never end up in a state where we double-fail
3441 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3442 // for a full revocation before failing.
3443 debug_assert!(update_fail_msg_option.is_some());
3444 update_fail_count += 1;
3447 if let ChannelError::Ignore(_) = e {}
3449 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3456 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3457 return (None, htlcs_to_fail);
3459 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3460 self.send_update_fee(feerate, false, fee_estimator, logger)
3465 let mut additional_update = self.build_commitment_no_status_check(logger);
3466 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3467 // but we want them to be strictly increasing by one, so reset it here.
3468 self.context.latest_monitor_update_id = monitor_update.update_id;
3469 monitor_update.updates.append(&mut additional_update.updates);
3471 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3472 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3473 update_add_count, update_fulfill_count, update_fail_count);
3475 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3476 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3482 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3483 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3484 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3485 /// generating an appropriate error *after* the channel state has been updated based on the
3486 /// revoke_and_ack message.
3487 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3488 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3489 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3490 where F::Target: FeeEstimator, L::Target: Logger,
3492 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3493 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3495 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3496 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3498 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3499 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3502 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3504 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3505 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3506 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3510 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3511 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3512 // haven't given them a new commitment transaction to broadcast). We should probably
3513 // take advantage of this by updating our channel monitor, sending them an error, and
3514 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3515 // lot of work, and there's some chance this is all a misunderstanding anyway.
3516 // We have to do *something*, though, since our signer may get mad at us for otherwise
3517 // jumping a remote commitment number, so best to just force-close and move on.
3518 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3521 #[cfg(any(test, fuzzing))]
3523 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3524 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3527 match &self.context.holder_signer {
3528 ChannelSignerType::Ecdsa(ecdsa) => {
3529 ecdsa.validate_counterparty_revocation(
3530 self.context.cur_counterparty_commitment_transaction_number + 1,
3532 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3534 // TODO (taproot|arik)
3539 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3540 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3541 self.context.latest_monitor_update_id += 1;
3542 let mut monitor_update = ChannelMonitorUpdate {
3543 update_id: self.context.latest_monitor_update_id,
3544 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3545 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3546 secret: msg.per_commitment_secret,
3550 // Update state now that we've passed all the can-fail calls...
3551 // (note that we may still fail to generate the new commitment_signed message, but that's
3552 // OK, we step the channel here and *then* if the new generation fails we can fail the
3553 // channel based on that, but stepping stuff here should be safe either way.
3554 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3555 self.context.sent_message_awaiting_response = None;
3556 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3557 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3558 self.context.cur_counterparty_commitment_transaction_number -= 1;
3560 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3561 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3564 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3565 let mut to_forward_infos = Vec::new();
3566 let mut revoked_htlcs = Vec::new();
3567 let mut finalized_claimed_htlcs = Vec::new();
3568 let mut update_fail_htlcs = Vec::new();
3569 let mut update_fail_malformed_htlcs = Vec::new();
3570 let mut require_commitment = false;
3571 let mut value_to_self_msat_diff: i64 = 0;
3574 // Take references explicitly so that we can hold multiple references to self.context.
3575 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3576 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3577 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3579 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3580 pending_inbound_htlcs.retain(|htlc| {
3581 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3582 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3583 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3584 value_to_self_msat_diff += htlc.amount_msat as i64;
3586 *expecting_peer_commitment_signed = true;
3590 pending_outbound_htlcs.retain(|htlc| {
3591 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3592 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3593 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3594 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3596 finalized_claimed_htlcs.push(htlc.source.clone());
3597 // They fulfilled, so we sent them money
3598 value_to_self_msat_diff -= htlc.amount_msat as i64;
3603 for htlc in pending_inbound_htlcs.iter_mut() {
3604 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3606 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3610 let mut state = InboundHTLCState::Committed;
3611 mem::swap(&mut state, &mut htlc.state);
3613 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3614 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3615 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3616 require_commitment = true;
3617 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3618 match forward_info {
3619 PendingHTLCStatus::Fail(fail_msg) => {
3620 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3621 require_commitment = true;
3623 HTLCFailureMsg::Relay(msg) => {
3624 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3625 update_fail_htlcs.push(msg)
3627 HTLCFailureMsg::Malformed(msg) => {
3628 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3629 update_fail_malformed_htlcs.push(msg)
3633 PendingHTLCStatus::Forward(forward_info) => {
3634 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3635 to_forward_infos.push((forward_info, htlc.htlc_id));
3636 htlc.state = InboundHTLCState::Committed;
3642 for htlc in pending_outbound_htlcs.iter_mut() {
3643 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3644 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3645 htlc.state = OutboundHTLCState::Committed;
3646 *expecting_peer_commitment_signed = true;
3648 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3649 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3650 // Grab the preimage, if it exists, instead of cloning
3651 let mut reason = OutboundHTLCOutcome::Success(None);
3652 mem::swap(outcome, &mut reason);
3653 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3654 require_commitment = true;
3658 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3660 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3661 match update_state {
3662 FeeUpdateState::Outbound => {
3663 debug_assert!(self.context.is_outbound());
3664 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3665 self.context.feerate_per_kw = feerate;
3666 self.context.pending_update_fee = None;
3667 self.context.expecting_peer_commitment_signed = true;
3669 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3670 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3671 debug_assert!(!self.context.is_outbound());
3672 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3673 require_commitment = true;
3674 self.context.feerate_per_kw = feerate;
3675 self.context.pending_update_fee = None;
3680 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3681 let release_state_str =
3682 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3683 macro_rules! return_with_htlcs_to_fail {
3684 ($htlcs_to_fail: expr) => {
3685 if !release_monitor {
3686 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3687 update: monitor_update,
3689 return Ok(($htlcs_to_fail, None));
3691 return Ok(($htlcs_to_fail, Some(monitor_update)));
3696 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3697 // We can't actually generate a new commitment transaction (incl by freeing holding
3698 // cells) while we can't update the monitor, so we just return what we have.
3699 if require_commitment {
3700 self.context.monitor_pending_commitment_signed = true;
3701 // When the monitor updating is restored we'll call
3702 // get_last_commitment_update_for_send(), which does not update state, but we're
3703 // definitely now awaiting a remote revoke before we can step forward any more, so
3705 let mut additional_update = self.build_commitment_no_status_check(logger);
3706 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3707 // strictly increasing by one, so decrement it here.
3708 self.context.latest_monitor_update_id = monitor_update.update_id;
3709 monitor_update.updates.append(&mut additional_update.updates);
3711 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3712 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3713 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3714 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3715 return_with_htlcs_to_fail!(Vec::new());
3718 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3719 (Some(mut additional_update), htlcs_to_fail) => {
3720 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3721 // strictly increasing by one, so decrement it here.
3722 self.context.latest_monitor_update_id = monitor_update.update_id;
3723 monitor_update.updates.append(&mut additional_update.updates);
3725 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3726 &self.context.channel_id(), release_state_str);
3728 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3729 return_with_htlcs_to_fail!(htlcs_to_fail);
3731 (None, htlcs_to_fail) => {
3732 if require_commitment {
3733 let mut additional_update = self.build_commitment_no_status_check(logger);
3735 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3736 // strictly increasing by one, so decrement it here.
3737 self.context.latest_monitor_update_id = monitor_update.update_id;
3738 monitor_update.updates.append(&mut additional_update.updates);
3740 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3741 &self.context.channel_id(),
3742 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3745 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3746 return_with_htlcs_to_fail!(htlcs_to_fail);
3748 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3749 &self.context.channel_id(), release_state_str);
3751 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3752 return_with_htlcs_to_fail!(htlcs_to_fail);
3758 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3759 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3760 /// commitment update.
3761 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3762 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3763 where F::Target: FeeEstimator, L::Target: Logger
3765 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3766 assert!(msg_opt.is_none(), "We forced holding cell?");
3769 /// Adds a pending update to this channel. See the doc for send_htlc for
3770 /// further details on the optionness of the return value.
3771 /// If our balance is too low to cover the cost of the next commitment transaction at the
3772 /// new feerate, the update is cancelled.
3774 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3775 /// [`Channel`] if `force_holding_cell` is false.
3776 fn send_update_fee<F: Deref, L: Deref>(
3777 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3778 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3779 ) -> Option<msgs::UpdateFee>
3780 where F::Target: FeeEstimator, L::Target: Logger
3782 if !self.context.is_outbound() {
3783 panic!("Cannot send fee from inbound channel");
3785 if !self.context.is_usable() {
3786 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3788 if !self.context.is_live() {
3789 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3792 // Before proposing a feerate update, check that we can actually afford the new fee.
3793 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3794 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3795 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3796 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3797 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3798 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3799 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3800 //TODO: auto-close after a number of failures?
3801 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3805 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3806 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3807 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3808 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3809 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3810 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3813 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3814 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3818 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3819 force_holding_cell = true;
3822 if force_holding_cell {
3823 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3827 debug_assert!(self.context.pending_update_fee.is_none());
3828 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3830 Some(msgs::UpdateFee {
3831 channel_id: self.context.channel_id,
3836 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3837 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3839 /// No further message handling calls may be made until a channel_reestablish dance has
3841 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3842 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3843 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3844 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3848 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3849 // While the below code should be idempotent, it's simpler to just return early, as
3850 // redundant disconnect events can fire, though they should be rare.
3854 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3855 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3858 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3859 // will be retransmitted.
3860 self.context.last_sent_closing_fee = None;
3861 self.context.pending_counterparty_closing_signed = None;
3862 self.context.closing_fee_limits = None;
3864 let mut inbound_drop_count = 0;
3865 self.context.pending_inbound_htlcs.retain(|htlc| {
3867 InboundHTLCState::RemoteAnnounced(_) => {
3868 // They sent us an update_add_htlc but we never got the commitment_signed.
3869 // We'll tell them what commitment_signed we're expecting next and they'll drop
3870 // this HTLC accordingly
3871 inbound_drop_count += 1;
3874 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3875 // We received a commitment_signed updating this HTLC and (at least hopefully)
3876 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3877 // in response to it yet, so don't touch it.
3880 InboundHTLCState::Committed => true,
3881 InboundHTLCState::LocalRemoved(_) => {
3882 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3883 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3884 // (that we missed). Keep this around for now and if they tell us they missed
3885 // the commitment_signed we can re-transmit the update then.
3890 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3892 if let Some((_, update_state)) = self.context.pending_update_fee {
3893 if update_state == FeeUpdateState::RemoteAnnounced {
3894 debug_assert!(!self.context.is_outbound());
3895 self.context.pending_update_fee = None;
3899 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3900 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3901 // They sent us an update to remove this but haven't yet sent the corresponding
3902 // commitment_signed, we need to move it back to Committed and they can re-send
3903 // the update upon reconnection.
3904 htlc.state = OutboundHTLCState::Committed;
3908 self.context.sent_message_awaiting_response = None;
3910 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3911 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3915 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3916 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3917 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3918 /// update completes (potentially immediately).
3919 /// The messages which were generated with the monitor update must *not* have been sent to the
3920 /// remote end, and must instead have been dropped. They will be regenerated when
3921 /// [`Self::monitor_updating_restored`] is called.
3923 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3924 /// [`chain::Watch`]: crate::chain::Watch
3925 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3926 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3927 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3928 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3929 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3931 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3932 self.context.monitor_pending_commitment_signed |= resend_commitment;
3933 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3934 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3935 self.context.monitor_pending_failures.append(&mut pending_fails);
3936 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3937 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3940 /// Indicates that the latest ChannelMonitor update has been committed by the client
3941 /// successfully and we should restore normal operation. Returns messages which should be sent
3942 /// to the remote side.
3943 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3944 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3945 user_config: &UserConfig, best_block_height: u32
3946 ) -> MonitorRestoreUpdates
3949 NS::Target: NodeSigner
3951 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3952 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3954 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3955 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3956 // first received the funding_signed.
3957 let mut funding_broadcastable =
3958 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3959 self.context.funding_transaction.take()
3961 // That said, if the funding transaction is already confirmed (ie we're active with a
3962 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3963 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3964 funding_broadcastable = None;
3967 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3968 // (and we assume the user never directly broadcasts the funding transaction and waits for
3969 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3970 // * an inbound channel that failed to persist the monitor on funding_created and we got
3971 // the funding transaction confirmed before the monitor was persisted, or
3972 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3973 let channel_ready = if self.context.monitor_pending_channel_ready {
3974 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3975 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3976 self.context.monitor_pending_channel_ready = false;
3977 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3978 Some(msgs::ChannelReady {
3979 channel_id: self.context.channel_id(),
3980 next_per_commitment_point,
3981 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3985 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3987 let mut accepted_htlcs = Vec::new();
3988 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3989 let mut failed_htlcs = Vec::new();
3990 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3991 let mut finalized_claimed_htlcs = Vec::new();
3992 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3994 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
3995 self.context.monitor_pending_revoke_and_ack = false;
3996 self.context.monitor_pending_commitment_signed = false;
3997 return MonitorRestoreUpdates {
3998 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
3999 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4003 let raa = if self.context.monitor_pending_revoke_and_ack {
4004 Some(self.get_last_revoke_and_ack())
4006 let commitment_update = if self.context.monitor_pending_commitment_signed {
4007 self.get_last_commitment_update_for_send(logger).ok()
4009 if commitment_update.is_some() {
4010 self.mark_awaiting_response();
4013 self.context.monitor_pending_revoke_and_ack = false;
4014 self.context.monitor_pending_commitment_signed = false;
4015 let order = self.context.resend_order.clone();
4016 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4017 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4018 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4019 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4020 MonitorRestoreUpdates {
4021 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4025 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4026 where F::Target: FeeEstimator, L::Target: Logger
4028 if self.context.is_outbound() {
4029 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4031 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4032 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4034 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4036 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4037 self.context.update_time_counter += 1;
4038 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4039 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4040 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4041 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4042 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4043 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4044 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4045 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4046 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4047 msg.feerate_per_kw, holder_tx_dust_exposure)));
4049 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4050 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4051 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4057 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4060 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4061 let commitment_update = if self.context.signer_pending_commitment_update {
4062 self.get_last_commitment_update_for_send(logger).ok()
4064 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4065 self.context.get_funding_signed_msg(logger).1
4067 let channel_ready = if funding_signed.is_some() {
4068 self.check_get_channel_ready(0)
4070 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4071 self.context.get_funding_created_msg(logger)
4074 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4075 if commitment_update.is_some() { "a" } else { "no" },
4076 if funding_signed.is_some() { "a" } else { "no" },
4077 if funding_created.is_some() { "a" } else { "no" },
4078 if channel_ready.is_some() { "a" } else { "no" });
4080 SignerResumeUpdates {
4088 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4089 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4090 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4091 msgs::RevokeAndACK {
4092 channel_id: self.context.channel_id,
4093 per_commitment_secret,
4094 next_per_commitment_point,
4096 next_local_nonce: None,
4100 /// Gets the last commitment update for immediate sending to our peer.
4101 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4102 let mut update_add_htlcs = Vec::new();
4103 let mut update_fulfill_htlcs = Vec::new();
4104 let mut update_fail_htlcs = Vec::new();
4105 let mut update_fail_malformed_htlcs = Vec::new();
4107 for htlc in self.context.pending_outbound_htlcs.iter() {
4108 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4109 update_add_htlcs.push(msgs::UpdateAddHTLC {
4110 channel_id: self.context.channel_id(),
4111 htlc_id: htlc.htlc_id,
4112 amount_msat: htlc.amount_msat,
4113 payment_hash: htlc.payment_hash,
4114 cltv_expiry: htlc.cltv_expiry,
4115 onion_routing_packet: (**onion_packet).clone(),
4116 skimmed_fee_msat: htlc.skimmed_fee_msat,
4117 blinding_point: htlc.blinding_point,
4122 for htlc in self.context.pending_inbound_htlcs.iter() {
4123 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4125 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4126 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4127 channel_id: self.context.channel_id(),
4128 htlc_id: htlc.htlc_id,
4129 reason: err_packet.clone()
4132 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4133 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4134 channel_id: self.context.channel_id(),
4135 htlc_id: htlc.htlc_id,
4136 sha256_of_onion: sha256_of_onion.clone(),
4137 failure_code: failure_code.clone(),
4140 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4141 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4142 channel_id: self.context.channel_id(),
4143 htlc_id: htlc.htlc_id,
4144 payment_preimage: payment_preimage.clone(),
4151 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4152 Some(msgs::UpdateFee {
4153 channel_id: self.context.channel_id(),
4154 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4158 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4159 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4160 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4161 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4162 if self.context.signer_pending_commitment_update {
4163 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4164 self.context.signer_pending_commitment_update = false;
4168 if !self.context.signer_pending_commitment_update {
4169 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4170 self.context.signer_pending_commitment_update = true;
4174 Ok(msgs::CommitmentUpdate {
4175 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4180 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4181 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4182 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4183 assert!(self.context.shutdown_scriptpubkey.is_some());
4184 Some(msgs::Shutdown {
4185 channel_id: self.context.channel_id,
4186 scriptpubkey: self.get_closing_scriptpubkey(),
4191 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4192 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4194 /// Some links printed in log lines are included here to check them during build (when run with
4195 /// `cargo doc --document-private-items`):
4196 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4197 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4198 pub fn channel_reestablish<L: Deref, NS: Deref>(
4199 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4200 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4201 ) -> Result<ReestablishResponses, ChannelError>
4204 NS::Target: NodeSigner
4206 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4207 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4208 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4209 // just close here instead of trying to recover.
4210 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4213 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4214 msg.next_local_commitment_number == 0 {
4215 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4218 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4219 if msg.next_remote_commitment_number > 0 {
4220 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4221 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4222 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4223 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4224 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4226 if msg.next_remote_commitment_number > our_commitment_transaction {
4227 macro_rules! log_and_panic {
4228 ($err_msg: expr) => {
4229 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4230 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4233 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4234 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4235 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4236 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4237 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4238 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4239 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4240 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4244 // Before we change the state of the channel, we check if the peer is sending a very old
4245 // commitment transaction number, if yes we send a warning message.
4246 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4247 return Err(ChannelError::Warn(format!(
4248 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4249 msg.next_remote_commitment_number,
4250 our_commitment_transaction
4254 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4255 // remaining cases either succeed or ErrorMessage-fail).
4256 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4257 self.context.sent_message_awaiting_response = None;
4259 let shutdown_msg = self.get_outbound_shutdown();
4261 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4263 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4264 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4265 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4266 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4267 if msg.next_remote_commitment_number != 0 {
4268 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4270 // Short circuit the whole handler as there is nothing we can resend them
4271 return Ok(ReestablishResponses {
4272 channel_ready: None,
4273 raa: None, commitment_update: None,
4274 order: RAACommitmentOrder::CommitmentFirst,
4275 shutdown_msg, announcement_sigs,
4279 // We have OurChannelReady set!
4280 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4281 return Ok(ReestablishResponses {
4282 channel_ready: Some(msgs::ChannelReady {
4283 channel_id: self.context.channel_id(),
4284 next_per_commitment_point,
4285 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4287 raa: None, commitment_update: None,
4288 order: RAACommitmentOrder::CommitmentFirst,
4289 shutdown_msg, announcement_sigs,
4293 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4294 // Remote isn't waiting on any RevokeAndACK from us!
4295 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4297 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4298 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4299 self.context.monitor_pending_revoke_and_ack = true;
4302 Some(self.get_last_revoke_and_ack())
4305 debug_assert!(false, "All values should have been handled in the four cases above");
4306 return Err(ChannelError::Close(format!(
4307 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4308 msg.next_remote_commitment_number,
4309 our_commitment_transaction
4313 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4314 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4315 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4316 // the corresponding revoke_and_ack back yet.
4317 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4318 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4319 self.mark_awaiting_response();
4321 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4323 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4324 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4325 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4326 Some(msgs::ChannelReady {
4327 channel_id: self.context.channel_id(),
4328 next_per_commitment_point,
4329 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4333 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4334 if required_revoke.is_some() {
4335 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4337 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4340 Ok(ReestablishResponses {
4341 channel_ready, shutdown_msg, announcement_sigs,
4342 raa: required_revoke,
4343 commitment_update: None,
4344 order: self.context.resend_order.clone(),
4346 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4347 if required_revoke.is_some() {
4348 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4350 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4353 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4354 self.context.monitor_pending_commitment_signed = true;
4355 Ok(ReestablishResponses {
4356 channel_ready, shutdown_msg, announcement_sigs,
4357 commitment_update: None, raa: None,
4358 order: self.context.resend_order.clone(),
4361 Ok(ReestablishResponses {
4362 channel_ready, shutdown_msg, announcement_sigs,
4363 raa: required_revoke,
4364 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4365 order: self.context.resend_order.clone(),
4368 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4369 Err(ChannelError::Close(format!(
4370 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4371 msg.next_local_commitment_number,
4372 next_counterparty_commitment_number,
4375 Err(ChannelError::Close(format!(
4376 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4377 msg.next_local_commitment_number,
4378 next_counterparty_commitment_number,
4383 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4384 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4385 /// at which point they will be recalculated.
4386 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4388 where F::Target: FeeEstimator
4390 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4392 // Propose a range from our current Background feerate to our Normal feerate plus our
4393 // force_close_avoidance_max_fee_satoshis.
4394 // If we fail to come to consensus, we'll have to force-close.
4395 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4396 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4397 // that we don't expect to need fee bumping
4398 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4399 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4401 // The spec requires that (when the channel does not have anchors) we only send absolute
4402 // channel fees no greater than the absolute channel fee on the current commitment
4403 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4404 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4405 // some force-closure by old nodes, but we wanted to close the channel anyway.
4407 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4408 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4409 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4410 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4413 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4414 // below our dust limit, causing the output to disappear. We don't bother handling this
4415 // case, however, as this should only happen if a channel is closed before any (material)
4416 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4417 // come to consensus with our counterparty on appropriate fees, however it should be a
4418 // relatively rare case. We can revisit this later, though note that in order to determine
4419 // if the funders' output is dust we have to know the absolute fee we're going to use.
4420 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4421 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4422 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4423 // We always add force_close_avoidance_max_fee_satoshis to our normal
4424 // feerate-calculated fee, but allow the max to be overridden if we're using a
4425 // target feerate-calculated fee.
4426 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4427 proposed_max_feerate as u64 * tx_weight / 1000)
4429 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4432 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4433 self.context.closing_fee_limits.clone().unwrap()
4436 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4437 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4438 /// this point if we're the funder we should send the initial closing_signed, and in any case
4439 /// shutdown should complete within a reasonable timeframe.
4440 fn closing_negotiation_ready(&self) -> bool {
4441 self.context.closing_negotiation_ready()
4444 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4445 /// an Err if no progress is being made and the channel should be force-closed instead.
4446 /// Should be called on a one-minute timer.
4447 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4448 if self.closing_negotiation_ready() {
4449 if self.context.closing_signed_in_flight {
4450 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4452 self.context.closing_signed_in_flight = true;
4458 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4459 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4460 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4461 where F::Target: FeeEstimator, L::Target: Logger
4463 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4464 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4465 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4466 // that closing_negotiation_ready checks this case (as well as a few others).
4467 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4468 return Ok((None, None, None));
4471 if !self.context.is_outbound() {
4472 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4473 return self.closing_signed(fee_estimator, &msg);
4475 return Ok((None, None, None));
4478 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4479 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4480 if self.context.expecting_peer_commitment_signed {
4481 return Ok((None, None, None));
4484 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4486 assert!(self.context.shutdown_scriptpubkey.is_some());
4487 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4488 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4489 our_min_fee, our_max_fee, total_fee_satoshis);
4491 match &self.context.holder_signer {
4492 ChannelSignerType::Ecdsa(ecdsa) => {
4494 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4495 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4497 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4498 Ok((Some(msgs::ClosingSigned {
4499 channel_id: self.context.channel_id,
4500 fee_satoshis: total_fee_satoshis,
4502 fee_range: Some(msgs::ClosingSignedFeeRange {
4503 min_fee_satoshis: our_min_fee,
4504 max_fee_satoshis: our_max_fee,
4508 // TODO (taproot|arik)
4514 // Marks a channel as waiting for a response from the counterparty. If it's not received
4515 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4517 fn mark_awaiting_response(&mut self) {
4518 self.context.sent_message_awaiting_response = Some(0);
4521 /// Determines whether we should disconnect the counterparty due to not receiving a response
4522 /// within our expected timeframe.
4524 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4525 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4526 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4529 // Don't disconnect when we're not waiting on a response.
4532 *ticks_elapsed += 1;
4533 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4537 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4538 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4540 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4541 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4543 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4544 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4545 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4546 // can do that via error message without getting a connection fail anyway...
4547 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4549 for htlc in self.context.pending_inbound_htlcs.iter() {
4550 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4551 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4554 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4556 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4557 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4560 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4561 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4562 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4565 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4568 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4569 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4570 // any further commitment updates after we set LocalShutdownSent.
4571 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4573 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4576 assert!(send_shutdown);
4577 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4578 Ok(scriptpubkey) => scriptpubkey,
4579 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4581 if !shutdown_scriptpubkey.is_compatible(their_features) {
4582 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4584 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4589 // From here on out, we may not fail!
4591 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4592 self.context.update_time_counter += 1;
4594 let monitor_update = if update_shutdown_script {
4595 self.context.latest_monitor_update_id += 1;
4596 let monitor_update = ChannelMonitorUpdate {
4597 update_id: self.context.latest_monitor_update_id,
4598 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4599 scriptpubkey: self.get_closing_scriptpubkey(),
4602 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4603 self.push_ret_blockable_mon_update(monitor_update)
4605 let shutdown = if send_shutdown {
4606 Some(msgs::Shutdown {
4607 channel_id: self.context.channel_id,
4608 scriptpubkey: self.get_closing_scriptpubkey(),
4612 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4613 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4614 // cell HTLCs and return them to fail the payment.
4615 self.context.holding_cell_update_fee = None;
4616 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4617 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4619 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4620 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4627 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4628 self.context.update_time_counter += 1;
4630 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4633 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4634 let mut tx = closing_tx.trust().built_transaction().clone();
4636 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4638 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4639 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4640 let mut holder_sig = sig.serialize_der().to_vec();
4641 holder_sig.push(EcdsaSighashType::All as u8);
4642 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4643 cp_sig.push(EcdsaSighashType::All as u8);
4644 if funding_key[..] < counterparty_funding_key[..] {
4645 tx.input[0].witness.push(holder_sig);
4646 tx.input[0].witness.push(cp_sig);
4648 tx.input[0].witness.push(cp_sig);
4649 tx.input[0].witness.push(holder_sig);
4652 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4656 pub fn closing_signed<F: Deref>(
4657 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4658 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4659 where F::Target: FeeEstimator
4661 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4662 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4664 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4665 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4667 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4668 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4670 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4671 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4674 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4675 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4678 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4679 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4680 return Ok((None, None, None));
4683 let funding_redeemscript = self.context.get_funding_redeemscript();
4684 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4685 if used_total_fee != msg.fee_satoshis {
4686 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4688 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4690 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4693 // The remote end may have decided to revoke their output due to inconsistent dust
4694 // limits, so check for that case by re-checking the signature here.
4695 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4696 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4697 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4701 for outp in closing_tx.trust().built_transaction().output.iter() {
4702 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4703 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4707 assert!(self.context.shutdown_scriptpubkey.is_some());
4708 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4709 if last_fee == msg.fee_satoshis {
4710 let shutdown_result = ShutdownResult {
4711 monitor_update: None,
4712 dropped_outbound_htlcs: Vec::new(),
4713 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4714 channel_id: self.context.channel_id,
4715 counterparty_node_id: self.context.counterparty_node_id,
4717 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4718 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4719 self.context.update_time_counter += 1;
4720 return Ok((None, Some(tx), Some(shutdown_result)));
4724 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4726 macro_rules! propose_fee {
4727 ($new_fee: expr) => {
4728 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4729 (closing_tx, $new_fee)
4731 self.build_closing_transaction($new_fee, false)
4734 return match &self.context.holder_signer {
4735 ChannelSignerType::Ecdsa(ecdsa) => {
4737 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4738 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4739 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4740 let shutdown_result = ShutdownResult {
4741 monitor_update: None,
4742 dropped_outbound_htlcs: Vec::new(),
4743 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4744 channel_id: self.context.channel_id,
4745 counterparty_node_id: self.context.counterparty_node_id,
4747 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4748 self.context.update_time_counter += 1;
4749 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4750 (Some(tx), Some(shutdown_result))
4755 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4756 Ok((Some(msgs::ClosingSigned {
4757 channel_id: self.context.channel_id,
4758 fee_satoshis: used_fee,
4760 fee_range: Some(msgs::ClosingSignedFeeRange {
4761 min_fee_satoshis: our_min_fee,
4762 max_fee_satoshis: our_max_fee,
4764 }), signed_tx, shutdown_result))
4766 // TODO (taproot|arik)
4773 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4774 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4775 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4777 if max_fee_satoshis < our_min_fee {
4778 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4780 if min_fee_satoshis > our_max_fee {
4781 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4784 if !self.context.is_outbound() {
4785 // They have to pay, so pick the highest fee in the overlapping range.
4786 // We should never set an upper bound aside from their full balance
4787 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4788 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4790 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4791 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4792 msg.fee_satoshis, our_min_fee, our_max_fee)));
4794 // The proposed fee is in our acceptable range, accept it and broadcast!
4795 propose_fee!(msg.fee_satoshis);
4798 // Old fee style negotiation. We don't bother to enforce whether they are complying
4799 // with the "making progress" requirements, we just comply and hope for the best.
4800 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4801 if msg.fee_satoshis > last_fee {
4802 if msg.fee_satoshis < our_max_fee {
4803 propose_fee!(msg.fee_satoshis);
4804 } else if last_fee < our_max_fee {
4805 propose_fee!(our_max_fee);
4807 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4810 if msg.fee_satoshis > our_min_fee {
4811 propose_fee!(msg.fee_satoshis);
4812 } else if last_fee > our_min_fee {
4813 propose_fee!(our_min_fee);
4815 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4819 if msg.fee_satoshis < our_min_fee {
4820 propose_fee!(our_min_fee);
4821 } else if msg.fee_satoshis > our_max_fee {
4822 propose_fee!(our_max_fee);
4824 propose_fee!(msg.fee_satoshis);
4830 fn internal_htlc_satisfies_config(
4831 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4832 ) -> Result<(), (&'static str, u16)> {
4833 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4834 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4835 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4836 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4838 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4839 0x1000 | 12, // fee_insufficient
4842 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4844 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4845 0x1000 | 13, // incorrect_cltv_expiry
4851 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4852 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4853 /// unsuccessful, falls back to the previous one if one exists.
4854 pub fn htlc_satisfies_config(
4855 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4856 ) -> Result<(), (&'static str, u16)> {
4857 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4859 if let Some(prev_config) = self.context.prev_config() {
4860 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4867 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4868 self.context.cur_holder_commitment_transaction_number + 1
4871 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4872 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4875 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4876 self.context.cur_counterparty_commitment_transaction_number + 2
4880 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
4881 &self.context.holder_signer
4885 pub fn get_value_stat(&self) -> ChannelValueStat {
4887 value_to_self_msat: self.context.value_to_self_msat,
4888 channel_value_msat: self.context.channel_value_satoshis * 1000,
4889 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4890 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4891 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4892 holding_cell_outbound_amount_msat: {
4894 for h in self.context.holding_cell_htlc_updates.iter() {
4896 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4904 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4905 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4909 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4910 /// Allowed in any state (including after shutdown)
4911 pub fn is_awaiting_monitor_update(&self) -> bool {
4912 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4915 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4916 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4917 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4918 self.context.blocked_monitor_updates[0].update.update_id - 1
4921 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4922 /// further blocked monitor update exists after the next.
4923 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4924 if self.context.blocked_monitor_updates.is_empty() { return None; }
4925 Some((self.context.blocked_monitor_updates.remove(0).update,
4926 !self.context.blocked_monitor_updates.is_empty()))
4929 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4930 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4931 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4932 -> Option<ChannelMonitorUpdate> {
4933 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4934 if !release_monitor {
4935 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4944 pub fn blocked_monitor_updates_pending(&self) -> usize {
4945 self.context.blocked_monitor_updates.len()
4948 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4949 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4950 /// transaction. If the channel is inbound, this implies simply that the channel has not
4952 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4953 if !self.is_awaiting_monitor_update() { return false; }
4954 if self.context.channel_state &
4955 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4956 == ChannelState::FundingSent as u32 {
4957 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4958 // FundingSent set, though our peer could have sent their channel_ready.
4959 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4962 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4963 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4964 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4965 // waiting for the initial monitor persistence. Thus, we check if our commitment
4966 // transaction numbers have both been iterated only exactly once (for the
4967 // funding_signed), and we're awaiting monitor update.
4969 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4970 // only way to get an awaiting-monitor-update state during initial funding is if the
4971 // initial monitor persistence is still pending).
4973 // Because deciding we're awaiting initial broadcast spuriously could result in
4974 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4975 // we hard-assert here, even in production builds.
4976 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4977 assert!(self.context.monitor_pending_channel_ready);
4978 assert_eq!(self.context.latest_monitor_update_id, 0);
4984 /// Returns true if our channel_ready has been sent
4985 pub fn is_our_channel_ready(&self) -> bool {
4986 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4989 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4990 pub fn received_shutdown(&self) -> bool {
4991 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4994 /// Returns true if we either initiated or agreed to shut down the channel.
4995 pub fn sent_shutdown(&self) -> bool {
4996 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
4999 /// Returns true if this channel is fully shut down. True here implies that no further actions
5000 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5001 /// will be handled appropriately by the chain monitor.
5002 pub fn is_shutdown(&self) -> bool {
5003 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
5004 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
5009 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5010 self.context.channel_update_status
5013 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5014 self.context.update_time_counter += 1;
5015 self.context.channel_update_status = status;
5018 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5020 // * always when a new block/transactions are confirmed with the new height
5021 // * when funding is signed with a height of 0
5022 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5026 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5027 if funding_tx_confirmations <= 0 {
5028 self.context.funding_tx_confirmation_height = 0;
5031 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5035 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5036 // channel_ready yet.
5037 if self.context.signer_pending_funding {
5041 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5042 // channel_ready until the entire batch is ready.
5043 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5044 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5045 self.context.channel_state |= ChannelState::OurChannelReady as u32;
5047 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5048 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
5049 self.context.update_time_counter += 1;
5051 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5052 // We got a reorg but not enough to trigger a force close, just ignore.
5055 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
5056 // We should never see a funding transaction on-chain until we've received
5057 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5058 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5059 // however, may do this and we shouldn't treat it as a bug.
5060 #[cfg(not(fuzzing))]
5061 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5062 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5063 self.context.channel_state);
5065 // We got a reorg but not enough to trigger a force close, just ignore.
5069 if need_commitment_update {
5070 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5071 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5072 let next_per_commitment_point =
5073 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5074 return Some(msgs::ChannelReady {
5075 channel_id: self.context.channel_id,
5076 next_per_commitment_point,
5077 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5081 self.context.monitor_pending_channel_ready = true;
5087 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5088 /// In the first case, we store the confirmation height and calculating the short channel id.
5089 /// In the second, we simply return an Err indicating we need to be force-closed now.
5090 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5091 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5092 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5093 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5095 NS::Target: NodeSigner,
5098 let mut msgs = (None, None);
5099 if let Some(funding_txo) = self.context.get_funding_txo() {
5100 for &(index_in_block, tx) in txdata.iter() {
5101 // Check if the transaction is the expected funding transaction, and if it is,
5102 // check that it pays the right amount to the right script.
5103 if self.context.funding_tx_confirmation_height == 0 {
5104 if tx.txid() == funding_txo.txid {
5105 let txo_idx = funding_txo.index as usize;
5106 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5107 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5108 if self.context.is_outbound() {
5109 // If we generated the funding transaction and it doesn't match what it
5110 // should, the client is really broken and we should just panic and
5111 // tell them off. That said, because hash collisions happen with high
5112 // probability in fuzzing mode, if we're fuzzing we just close the
5113 // channel and move on.
5114 #[cfg(not(fuzzing))]
5115 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5117 self.context.update_time_counter += 1;
5118 let err_reason = "funding tx had wrong script/value or output index";
5119 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5121 if self.context.is_outbound() {
5122 if !tx.is_coin_base() {
5123 for input in tx.input.iter() {
5124 if input.witness.is_empty() {
5125 // We generated a malleable funding transaction, implying we've
5126 // just exposed ourselves to funds loss to our counterparty.
5127 #[cfg(not(fuzzing))]
5128 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5133 self.context.funding_tx_confirmation_height = height;
5134 self.context.funding_tx_confirmed_in = Some(*block_hash);
5135 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5136 Ok(scid) => Some(scid),
5137 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5140 // If this is a coinbase transaction and not a 0-conf channel
5141 // we should update our min_depth to 100 to handle coinbase maturity
5142 if tx.is_coin_base() &&
5143 self.context.minimum_depth.unwrap_or(0) > 0 &&
5144 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5145 self.context.minimum_depth = Some(COINBASE_MATURITY);
5148 // If we allow 1-conf funding, we may need to check for channel_ready here and
5149 // send it immediately instead of waiting for a best_block_updated call (which
5150 // may have already happened for this block).
5151 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5152 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5153 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5154 msgs = (Some(channel_ready), announcement_sigs);
5157 for inp in tx.input.iter() {
5158 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5159 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5160 return Err(ClosureReason::CommitmentTxConfirmed);
5168 /// When a new block is connected, we check the height of the block against outbound holding
5169 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5170 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5171 /// handled by the ChannelMonitor.
5173 /// If we return Err, the channel may have been closed, at which point the standard
5174 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5177 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5179 pub fn best_block_updated<NS: Deref, L: Deref>(
5180 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5181 node_signer: &NS, user_config: &UserConfig, logger: &L
5182 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5184 NS::Target: NodeSigner,
5187 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5190 fn do_best_block_updated<NS: Deref, L: Deref>(
5191 &mut self, height: u32, highest_header_time: u32,
5192 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5193 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5195 NS::Target: NodeSigner,
5198 let mut timed_out_htlcs = Vec::new();
5199 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5200 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5202 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5203 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5205 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5206 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5207 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5215 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5217 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5218 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5219 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5221 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5222 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5225 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5226 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5227 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5228 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5229 if self.context.funding_tx_confirmation_height == 0 {
5230 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5231 // zero if it has been reorged out, however in either case, our state flags
5232 // indicate we've already sent a channel_ready
5233 funding_tx_confirmations = 0;
5236 // If we've sent channel_ready (or have both sent and received channel_ready), and
5237 // the funding transaction has become unconfirmed,
5238 // close the channel and hope we can get the latest state on chain (because presumably
5239 // the funding transaction is at least still in the mempool of most nodes).
5241 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5242 // 0-conf channel, but not doing so may lead to the
5243 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5245 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5246 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5247 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5248 return Err(ClosureReason::ProcessingError { err: err_reason });
5250 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5251 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5252 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5253 // If funding_tx_confirmed_in is unset, the channel must not be active
5254 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5255 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5256 return Err(ClosureReason::FundingTimedOut);
5259 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5260 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5262 Ok((None, timed_out_htlcs, announcement_sigs))
5265 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5266 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5267 /// before the channel has reached channel_ready and we can just wait for more blocks.
5268 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5269 if self.context.funding_tx_confirmation_height != 0 {
5270 // We handle the funding disconnection by calling best_block_updated with a height one
5271 // below where our funding was connected, implying a reorg back to conf_height - 1.
5272 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5273 // We use the time field to bump the current time we set on channel updates if its
5274 // larger. If we don't know that time has moved forward, we can just set it to the last
5275 // time we saw and it will be ignored.
5276 let best_time = self.context.update_time_counter;
5277 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5278 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5279 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5280 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5281 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5287 // We never learned about the funding confirmation anyway, just ignore
5292 // Methods to get unprompted messages to send to the remote end (or where we already returned
5293 // something in the handler for the message that prompted this message):
5295 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5296 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5297 /// directions). Should be used for both broadcasted announcements and in response to an
5298 /// AnnouncementSignatures message from the remote peer.
5300 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5303 /// This will only return ChannelError::Ignore upon failure.
5305 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5306 fn get_channel_announcement<NS: Deref>(
5307 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5308 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5309 if !self.context.config.announced_channel {
5310 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5312 if !self.context.is_usable() {
5313 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5316 let short_channel_id = self.context.get_short_channel_id()
5317 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5318 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5319 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5320 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5321 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5323 let msg = msgs::UnsignedChannelAnnouncement {
5324 features: channelmanager::provided_channel_features(&user_config),
5327 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5328 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5329 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5330 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5331 excess_data: Vec::new(),
5337 fn get_announcement_sigs<NS: Deref, L: Deref>(
5338 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5339 best_block_height: u32, logger: &L
5340 ) -> Option<msgs::AnnouncementSignatures>
5342 NS::Target: NodeSigner,
5345 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5349 if !self.context.is_usable() {
5353 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5354 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5358 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5362 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5363 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5366 log_trace!(logger, "{:?}", e);
5370 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5372 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5377 match &self.context.holder_signer {
5378 ChannelSignerType::Ecdsa(ecdsa) => {
5379 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5381 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5386 let short_channel_id = match self.context.get_short_channel_id() {
5388 None => return None,
5391 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5393 Some(msgs::AnnouncementSignatures {
5394 channel_id: self.context.channel_id(),
5396 node_signature: our_node_sig,
5397 bitcoin_signature: our_bitcoin_sig,
5400 // TODO (taproot|arik)
5406 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5408 fn sign_channel_announcement<NS: Deref>(
5409 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5410 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5411 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5412 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5413 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5414 let were_node_one = announcement.node_id_1 == our_node_key;
5416 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5417 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5418 match &self.context.holder_signer {
5419 ChannelSignerType::Ecdsa(ecdsa) => {
5420 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5421 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5422 Ok(msgs::ChannelAnnouncement {
5423 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5424 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5425 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5426 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5427 contents: announcement,
5430 // TODO (taproot|arik)
5435 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5439 /// Processes an incoming announcement_signatures message, providing a fully-signed
5440 /// channel_announcement message which we can broadcast and storing our counterparty's
5441 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5442 pub fn announcement_signatures<NS: Deref>(
5443 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5444 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5445 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5446 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5448 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5450 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5451 return Err(ChannelError::Close(format!(
5452 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5453 &announcement, self.context.get_counterparty_node_id())));
5455 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5456 return Err(ChannelError::Close(format!(
5457 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5458 &announcement, self.context.counterparty_funding_pubkey())));
5461 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5462 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5463 return Err(ChannelError::Ignore(
5464 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5467 self.sign_channel_announcement(node_signer, announcement)
5470 /// Gets a signed channel_announcement for this channel, if we previously received an
5471 /// announcement_signatures from our counterparty.
5472 pub fn get_signed_channel_announcement<NS: Deref>(
5473 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5474 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5475 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5478 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5480 Err(_) => return None,
5482 match self.sign_channel_announcement(node_signer, announcement) {
5483 Ok(res) => Some(res),
5488 /// May panic if called on a channel that wasn't immediately-previously
5489 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5490 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5491 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5492 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5493 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5494 // current to_remote balances. However, it no longer has any use, and thus is now simply
5495 // set to a dummy (but valid, as required by the spec) public key.
5496 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5497 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5498 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5499 let mut pk = [2; 33]; pk[1] = 0xff;
5500 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5501 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5502 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5503 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5506 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5509 self.mark_awaiting_response();
5510 msgs::ChannelReestablish {
5511 channel_id: self.context.channel_id(),
5512 // The protocol has two different commitment number concepts - the "commitment
5513 // transaction number", which starts from 0 and counts up, and the "revocation key
5514 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5515 // commitment transaction numbers by the index which will be used to reveal the
5516 // revocation key for that commitment transaction, which means we have to convert them
5517 // to protocol-level commitment numbers here...
5519 // next_local_commitment_number is the next commitment_signed number we expect to
5520 // receive (indicating if they need to resend one that we missed).
5521 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5522 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5523 // receive, however we track it by the next commitment number for a remote transaction
5524 // (which is one further, as they always revoke previous commitment transaction, not
5525 // the one we send) so we have to decrement by 1. Note that if
5526 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5527 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5529 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5530 your_last_per_commitment_secret: remote_last_secret,
5531 my_current_per_commitment_point: dummy_pubkey,
5532 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5533 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5534 // txid of that interactive transaction, else we MUST NOT set it.
5535 next_funding_txid: None,
5540 // Send stuff to our remote peers:
5542 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5543 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5544 /// commitment update.
5546 /// `Err`s will only be [`ChannelError::Ignore`].
5547 pub fn queue_add_htlc<F: Deref, L: Deref>(
5548 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5549 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5550 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5551 ) -> Result<(), ChannelError>
5552 where F::Target: FeeEstimator, L::Target: Logger
5555 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5556 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5557 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5559 if let ChannelError::Ignore(_) = err { /* fine */ }
5560 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5565 /// Adds a pending outbound HTLC to this channel, note that you probably want
5566 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5568 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5570 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5571 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5573 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5574 /// we may not yet have sent the previous commitment update messages and will need to
5575 /// regenerate them.
5577 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5578 /// on this [`Channel`] if `force_holding_cell` is false.
5580 /// `Err`s will only be [`ChannelError::Ignore`].
5581 fn send_htlc<F: Deref, L: Deref>(
5582 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5583 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5584 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5585 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5586 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5587 where F::Target: FeeEstimator, L::Target: Logger
5589 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5590 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5592 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5593 if amount_msat > channel_total_msat {
5594 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5597 if amount_msat == 0 {
5598 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5601 let available_balances = self.context.get_available_balances(fee_estimator);
5602 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5603 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5604 available_balances.next_outbound_htlc_minimum_msat)));
5607 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5608 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5609 available_balances.next_outbound_htlc_limit_msat)));
5612 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5613 // Note that this should never really happen, if we're !is_live() on receipt of an
5614 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5615 // the user to send directly into a !is_live() channel. However, if we
5616 // disconnected during the time the previous hop was doing the commitment dance we may
5617 // end up getting here after the forwarding delay. In any case, returning an
5618 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5619 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5622 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5623 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5624 payment_hash, amount_msat,
5625 if force_holding_cell { "into holding cell" }
5626 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5627 else { "to peer" });
5629 if need_holding_cell {
5630 force_holding_cell = true;
5633 // Now update local state:
5634 if force_holding_cell {
5635 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5640 onion_routing_packet,
5647 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5648 htlc_id: self.context.next_holder_htlc_id,
5650 payment_hash: payment_hash.clone(),
5652 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5658 let res = msgs::UpdateAddHTLC {
5659 channel_id: self.context.channel_id,
5660 htlc_id: self.context.next_holder_htlc_id,
5664 onion_routing_packet,
5668 self.context.next_holder_htlc_id += 1;
5673 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5674 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5675 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5676 // fail to generate this, we still are at least at a position where upgrading their status
5678 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5679 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5680 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5682 if let Some(state) = new_state {
5683 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5687 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5688 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5689 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5690 // Grab the preimage, if it exists, instead of cloning
5691 let mut reason = OutboundHTLCOutcome::Success(None);
5692 mem::swap(outcome, &mut reason);
5693 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5696 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5697 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5698 debug_assert!(!self.context.is_outbound());
5699 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5700 self.context.feerate_per_kw = feerate;
5701 self.context.pending_update_fee = None;
5704 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5706 let (mut htlcs_ref, counterparty_commitment_tx) =
5707 self.build_commitment_no_state_update(logger);
5708 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5709 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5710 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5712 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5713 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5716 self.context.latest_monitor_update_id += 1;
5717 let monitor_update = ChannelMonitorUpdate {
5718 update_id: self.context.latest_monitor_update_id,
5719 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5720 commitment_txid: counterparty_commitment_txid,
5721 htlc_outputs: htlcs.clone(),
5722 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5723 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5724 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5725 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5726 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5729 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5733 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5734 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5735 where L::Target: Logger
5737 let counterparty_keys = self.context.build_remote_transaction_keys();
5738 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5739 let counterparty_commitment_tx = commitment_stats.tx;
5741 #[cfg(any(test, fuzzing))]
5743 if !self.context.is_outbound() {
5744 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5745 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5746 if let Some(info) = projected_commit_tx_info {
5747 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5748 if info.total_pending_htlcs == total_pending_htlcs
5749 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5750 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5751 && info.feerate == self.context.feerate_per_kw {
5752 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5753 assert_eq!(actual_fee, info.fee);
5759 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5762 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5763 /// generation when we shouldn't change HTLC/channel state.
5764 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5765 // Get the fee tests from `build_commitment_no_state_update`
5766 #[cfg(any(test, fuzzing))]
5767 self.build_commitment_no_state_update(logger);
5769 let counterparty_keys = self.context.build_remote_transaction_keys();
5770 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5771 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5773 match &self.context.holder_signer {
5774 ChannelSignerType::Ecdsa(ecdsa) => {
5775 let (signature, htlc_signatures);
5778 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5779 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5783 let res = ecdsa.sign_counterparty_commitment(&commitment_stats.tx, commitment_stats.preimages, &self.context.secp_ctx)
5784 .map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5786 htlc_signatures = res.1;
5788 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5789 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5790 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5791 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5793 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5794 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5795 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5796 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5797 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5798 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5802 Ok((msgs::CommitmentSigned {
5803 channel_id: self.context.channel_id,
5807 partial_signature_with_nonce: None,
5808 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5810 // TODO (taproot|arik)
5816 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5817 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5819 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5820 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5821 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5822 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5823 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5824 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5825 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5826 where F::Target: FeeEstimator, L::Target: Logger
5828 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5829 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
5830 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5833 let monitor_update = self.build_commitment_no_status_check(logger);
5834 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5835 Ok(self.push_ret_blockable_mon_update(monitor_update))
5841 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5843 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5844 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5845 fee_base_msat: msg.contents.fee_base_msat,
5846 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5847 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5849 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5851 self.context.counterparty_forwarding_info = new_forwarding_info;
5857 /// Begins the shutdown process, getting a message for the remote peer and returning all
5858 /// holding cell HTLCs for payment failure.
5860 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5861 /// [`ChannelMonitorUpdate`] will be returned).
5862 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5863 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5864 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5866 for htlc in self.context.pending_outbound_htlcs.iter() {
5867 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5868 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5871 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5872 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5873 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5875 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5876 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5879 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5880 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5882 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5883 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5884 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5887 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5888 // script is set, we just force-close and call it a day.
5889 let mut chan_closed = false;
5890 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5894 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5896 None if !chan_closed => {
5897 // use override shutdown script if provided
5898 let shutdown_scriptpubkey = match override_shutdown_script {
5899 Some(script) => script,
5901 // otherwise, use the shutdown scriptpubkey provided by the signer
5902 match signer_provider.get_shutdown_scriptpubkey() {
5903 Ok(scriptpubkey) => scriptpubkey,
5904 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5908 if !shutdown_scriptpubkey.is_compatible(their_features) {
5909 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5911 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5917 // From here on out, we may not fail!
5918 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5919 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5920 let shutdown_result = ShutdownResult {
5921 monitor_update: None,
5922 dropped_outbound_htlcs: Vec::new(),
5923 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5924 channel_id: self.context.channel_id,
5925 counterparty_node_id: self.context.counterparty_node_id,
5927 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5928 Some(shutdown_result)
5930 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5933 self.context.update_time_counter += 1;
5935 let monitor_update = if update_shutdown_script {
5936 self.context.latest_monitor_update_id += 1;
5937 let monitor_update = ChannelMonitorUpdate {
5938 update_id: self.context.latest_monitor_update_id,
5939 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5940 scriptpubkey: self.get_closing_scriptpubkey(),
5943 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5944 self.push_ret_blockable_mon_update(monitor_update)
5946 let shutdown = msgs::Shutdown {
5947 channel_id: self.context.channel_id,
5948 scriptpubkey: self.get_closing_scriptpubkey(),
5951 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5952 // our shutdown until we've committed all of the pending changes.
5953 self.context.holding_cell_update_fee = None;
5954 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5955 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5957 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5958 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5965 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5966 "we can't both complete shutdown and return a monitor update");
5968 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5971 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5972 self.context.holding_cell_htlc_updates.iter()
5973 .flat_map(|htlc_update| {
5975 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5976 => Some((source, payment_hash)),
5980 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5984 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5985 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5986 pub context: ChannelContext<SP>,
5987 pub unfunded_context: UnfundedChannelContext,
5990 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
5991 pub fn new<ES: Deref, F: Deref>(
5992 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
5993 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
5994 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
5995 ) -> Result<OutboundV1Channel<SP>, APIError>
5996 where ES::Target: EntropySource,
5997 F::Target: FeeEstimator
5999 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6000 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6001 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6002 let pubkeys = holder_signer.pubkeys().clone();
6004 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6005 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6007 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6008 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6010 let channel_value_msat = channel_value_satoshis * 1000;
6011 if push_msat > channel_value_msat {
6012 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6014 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6015 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6017 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6018 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6019 // Protocol level safety check in place, although it should never happen because
6020 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6021 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6024 let channel_type = Self::get_initial_channel_type(&config, their_features);
6025 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6027 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6028 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6030 (ConfirmationTarget::NonAnchorChannelFee, 0)
6032 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6034 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6035 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6036 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6037 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6040 let mut secp_ctx = Secp256k1::new();
6041 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6043 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6044 match signer_provider.get_shutdown_scriptpubkey() {
6045 Ok(scriptpubkey) => Some(scriptpubkey),
6046 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6050 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6051 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6052 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6056 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6057 Ok(script) => script,
6058 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6061 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6064 context: ChannelContext {
6067 config: LegacyChannelConfig {
6068 options: config.channel_config.clone(),
6069 announced_channel: config.channel_handshake_config.announced_channel,
6070 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6075 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6077 channel_id: temporary_channel_id,
6078 temporary_channel_id: Some(temporary_channel_id),
6079 channel_state: ChannelState::OurInitSent as u32,
6080 announcement_sigs_state: AnnouncementSigsState::NotSent,
6082 channel_value_satoshis,
6084 latest_monitor_update_id: 0,
6086 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6087 shutdown_scriptpubkey,
6090 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6091 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6094 pending_inbound_htlcs: Vec::new(),
6095 pending_outbound_htlcs: Vec::new(),
6096 holding_cell_htlc_updates: Vec::new(),
6097 pending_update_fee: None,
6098 holding_cell_update_fee: None,
6099 next_holder_htlc_id: 0,
6100 next_counterparty_htlc_id: 0,
6101 update_time_counter: 1,
6103 resend_order: RAACommitmentOrder::CommitmentFirst,
6105 monitor_pending_channel_ready: false,
6106 monitor_pending_revoke_and_ack: false,
6107 monitor_pending_commitment_signed: false,
6108 monitor_pending_forwards: Vec::new(),
6109 monitor_pending_failures: Vec::new(),
6110 monitor_pending_finalized_fulfills: Vec::new(),
6112 signer_pending_commitment_update: false,
6113 signer_pending_funding: false,
6115 #[cfg(debug_assertions)]
6116 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6117 #[cfg(debug_assertions)]
6118 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6120 last_sent_closing_fee: None,
6121 pending_counterparty_closing_signed: None,
6122 expecting_peer_commitment_signed: false,
6123 closing_fee_limits: None,
6124 target_closing_feerate_sats_per_kw: None,
6126 funding_tx_confirmed_in: None,
6127 funding_tx_confirmation_height: 0,
6128 short_channel_id: None,
6129 channel_creation_height: current_chain_height,
6131 feerate_per_kw: commitment_feerate,
6132 counterparty_dust_limit_satoshis: 0,
6133 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6134 counterparty_max_htlc_value_in_flight_msat: 0,
6135 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6136 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6137 holder_selected_channel_reserve_satoshis,
6138 counterparty_htlc_minimum_msat: 0,
6139 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6140 counterparty_max_accepted_htlcs: 0,
6141 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6142 minimum_depth: None, // Filled in in accept_channel
6144 counterparty_forwarding_info: None,
6146 channel_transaction_parameters: ChannelTransactionParameters {
6147 holder_pubkeys: pubkeys,
6148 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6149 is_outbound_from_holder: true,
6150 counterparty_parameters: None,
6151 funding_outpoint: None,
6152 channel_type_features: channel_type.clone()
6154 funding_transaction: None,
6155 is_batch_funding: None,
6157 counterparty_cur_commitment_point: None,
6158 counterparty_prev_commitment_point: None,
6159 counterparty_node_id,
6161 counterparty_shutdown_scriptpubkey: None,
6163 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6165 channel_update_status: ChannelUpdateStatus::Enabled,
6166 closing_signed_in_flight: false,
6168 announcement_sigs: None,
6170 #[cfg(any(test, fuzzing))]
6171 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6172 #[cfg(any(test, fuzzing))]
6173 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6175 workaround_lnd_bug_4006: None,
6176 sent_message_awaiting_response: None,
6178 latest_inbound_scid_alias: None,
6179 outbound_scid_alias,
6181 channel_pending_event_emitted: false,
6182 channel_ready_event_emitted: false,
6184 #[cfg(any(test, fuzzing))]
6185 historical_inbound_htlc_fulfills: HashSet::new(),
6190 blocked_monitor_updates: Vec::new(),
6192 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6196 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6197 /// a funding_created message for the remote peer.
6198 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6199 /// or if called on an inbound channel.
6200 /// Note that channel_id changes during this call!
6201 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6202 /// If an Err is returned, it is a ChannelError::Close.
6203 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6204 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6205 if !self.context.is_outbound() {
6206 panic!("Tried to create outbound funding_created message on an inbound channel!");
6208 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6209 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6211 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6212 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6213 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6214 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6217 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6218 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6220 // Now that we're past error-generating stuff, update our local state:
6222 self.context.channel_state = ChannelState::FundingCreated as u32;
6223 self.context.channel_id = funding_txo.to_channel_id();
6225 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6226 // We can skip this if it is a zero-conf channel.
6227 if funding_transaction.is_coin_base() &&
6228 self.context.minimum_depth.unwrap_or(0) > 0 &&
6229 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6230 self.context.minimum_depth = Some(COINBASE_MATURITY);
6233 self.context.funding_transaction = Some(funding_transaction);
6234 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6236 let funding_created = self.context.get_funding_created_msg(logger);
6237 if funding_created.is_none() {
6238 if !self.context.signer_pending_funding {
6239 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6240 self.context.signer_pending_funding = true;
6244 let channel = Channel {
6245 context: self.context,
6248 Ok((channel, funding_created))
6251 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6252 // The default channel type (ie the first one we try) depends on whether the channel is
6253 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6254 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6255 // with no other changes, and fall back to `only_static_remotekey`.
6256 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6257 if !config.channel_handshake_config.announced_channel &&
6258 config.channel_handshake_config.negotiate_scid_privacy &&
6259 their_features.supports_scid_privacy() {
6260 ret.set_scid_privacy_required();
6263 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6264 // set it now. If they don't understand it, we'll fall back to our default of
6265 // `only_static_remotekey`.
6266 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6267 their_features.supports_anchors_zero_fee_htlc_tx() {
6268 ret.set_anchors_zero_fee_htlc_tx_required();
6274 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6275 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6276 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6277 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6278 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6279 ) -> Result<msgs::OpenChannel, ()>
6281 F::Target: FeeEstimator
6283 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6284 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6285 // We've exhausted our options
6288 // We support opening a few different types of channels. Try removing our additional
6289 // features one by one until we've either arrived at our default or the counterparty has
6292 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6293 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6294 // checks whether the counterparty supports every feature, this would only happen if the
6295 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6297 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6298 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6299 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6300 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6301 } else if self.context.channel_type.supports_scid_privacy() {
6302 self.context.channel_type.clear_scid_privacy();
6304 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6306 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6307 Ok(self.get_open_channel(chain_hash))
6310 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6311 if !self.context.is_outbound() {
6312 panic!("Tried to open a channel for an inbound channel?");
6314 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6315 panic!("Cannot generate an open_channel after we've moved forward");
6318 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6319 panic!("Tried to send an open_channel for a channel that has already advanced");
6322 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6323 let keys = self.context.get_holder_pubkeys();
6327 temporary_channel_id: self.context.channel_id,
6328 funding_satoshis: self.context.channel_value_satoshis,
6329 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6330 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6331 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6332 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6333 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6334 feerate_per_kw: self.context.feerate_per_kw as u32,
6335 to_self_delay: self.context.get_holder_selected_contest_delay(),
6336 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6337 funding_pubkey: keys.funding_pubkey,
6338 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6339 payment_point: keys.payment_point,
6340 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6341 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6342 first_per_commitment_point,
6343 channel_flags: if self.context.config.announced_channel {1} else {0},
6344 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6345 Some(script) => script.clone().into_inner(),
6346 None => Builder::new().into_script(),
6348 channel_type: Some(self.context.channel_type.clone()),
6353 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6354 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6356 // Check sanity of message fields:
6357 if !self.context.is_outbound() {
6358 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6360 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6361 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6363 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6364 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6366 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6367 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6369 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6370 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6372 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6373 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6374 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6376 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6377 if msg.htlc_minimum_msat >= full_channel_value_msat {
6378 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6380 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6381 if msg.to_self_delay > max_delay_acceptable {
6382 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6384 if msg.max_accepted_htlcs < 1 {
6385 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6387 if msg.max_accepted_htlcs > MAX_HTLCS {
6388 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6391 // Now check against optional parameters as set by config...
6392 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6393 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6395 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6396 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6398 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6399 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6401 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6402 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6404 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6405 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6407 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6408 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6410 if msg.minimum_depth > peer_limits.max_minimum_depth {
6411 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6414 if let Some(ty) = &msg.channel_type {
6415 if *ty != self.context.channel_type {
6416 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6418 } else if their_features.supports_channel_type() {
6419 // Assume they've accepted the channel type as they said they understand it.
6421 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6422 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6423 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6425 self.context.channel_type = channel_type.clone();
6426 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6429 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6430 match &msg.shutdown_scriptpubkey {
6431 &Some(ref script) => {
6432 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6433 if script.len() == 0 {
6436 if !script::is_bolt2_compliant(&script, their_features) {
6437 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6439 Some(script.clone())
6442 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6444 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6449 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6450 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6451 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6452 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6453 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6455 if peer_limits.trust_own_funding_0conf {
6456 self.context.minimum_depth = Some(msg.minimum_depth);
6458 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6461 let counterparty_pubkeys = ChannelPublicKeys {
6462 funding_pubkey: msg.funding_pubkey,
6463 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6464 payment_point: msg.payment_point,
6465 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6466 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6469 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6470 selected_contest_delay: msg.to_self_delay,
6471 pubkeys: counterparty_pubkeys,
6474 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6475 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6477 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6478 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6484 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6485 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6486 pub context: ChannelContext<SP>,
6487 pub unfunded_context: UnfundedChannelContext,
6490 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6491 /// Creates a new channel from a remote sides' request for one.
6492 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6493 pub fn new<ES: Deref, F: Deref, L: Deref>(
6494 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6495 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6496 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6497 current_chain_height: u32, logger: &L, is_0conf: bool,
6498 ) -> Result<InboundV1Channel<SP>, ChannelError>
6499 where ES::Target: EntropySource,
6500 F::Target: FeeEstimator,
6503 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6504 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6506 // First check the channel type is known, failing before we do anything else if we don't
6507 // support this channel type.
6508 let channel_type = if let Some(channel_type) = &msg.channel_type {
6509 if channel_type.supports_any_optional_bits() {
6510 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6513 // We only support the channel types defined by the `ChannelManager` in
6514 // `provided_channel_type_features`. The channel type must always support
6515 // `static_remote_key`.
6516 if !channel_type.requires_static_remote_key() {
6517 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6519 // Make sure we support all of the features behind the channel type.
6520 if !channel_type.is_subset(our_supported_features) {
6521 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6523 if channel_type.requires_scid_privacy() && announced_channel {
6524 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6526 channel_type.clone()
6528 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6529 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6530 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6535 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6536 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6537 let pubkeys = holder_signer.pubkeys().clone();
6538 let counterparty_pubkeys = ChannelPublicKeys {
6539 funding_pubkey: msg.funding_pubkey,
6540 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6541 payment_point: msg.payment_point,
6542 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6543 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6546 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6547 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6550 // Check sanity of message fields:
6551 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6552 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6554 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6555 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6557 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6558 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6560 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6561 if msg.push_msat > full_channel_value_msat {
6562 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6564 if msg.dust_limit_satoshis > msg.funding_satoshis {
6565 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6567 if msg.htlc_minimum_msat >= full_channel_value_msat {
6568 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6570 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6572 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6573 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6574 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6576 if msg.max_accepted_htlcs < 1 {
6577 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6579 if msg.max_accepted_htlcs > MAX_HTLCS {
6580 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6583 // Now check against optional parameters as set by config...
6584 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6585 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6587 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6588 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6590 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6591 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6593 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6594 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6596 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6597 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6599 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6600 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6602 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6603 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6606 // Convert things into internal flags and prep our state:
6608 if config.channel_handshake_limits.force_announced_channel_preference {
6609 if config.channel_handshake_config.announced_channel != announced_channel {
6610 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6614 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6615 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6616 // Protocol level safety check in place, although it should never happen because
6617 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6618 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6620 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6621 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6623 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6624 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6625 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6627 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6628 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6631 // check if the funder's amount for the initial commitment tx is sufficient
6632 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6633 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6634 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6638 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6639 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6640 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6641 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6644 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6645 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6646 // want to push much to us), our counterparty should always have more than our reserve.
6647 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6648 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6651 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6652 match &msg.shutdown_scriptpubkey {
6653 &Some(ref script) => {
6654 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6655 if script.len() == 0 {
6658 if !script::is_bolt2_compliant(&script, their_features) {
6659 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6661 Some(script.clone())
6664 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6666 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6671 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6672 match signer_provider.get_shutdown_scriptpubkey() {
6673 Ok(scriptpubkey) => Some(scriptpubkey),
6674 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6678 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6679 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6680 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6684 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6685 Ok(script) => script,
6686 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6689 let mut secp_ctx = Secp256k1::new();
6690 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6692 let minimum_depth = if is_0conf {
6695 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6699 context: ChannelContext {
6702 config: LegacyChannelConfig {
6703 options: config.channel_config.clone(),
6705 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6710 inbound_handshake_limits_override: None,
6712 temporary_channel_id: Some(msg.temporary_channel_id),
6713 channel_id: msg.temporary_channel_id,
6714 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6715 announcement_sigs_state: AnnouncementSigsState::NotSent,
6718 latest_monitor_update_id: 0,
6720 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6721 shutdown_scriptpubkey,
6724 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6725 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6726 value_to_self_msat: msg.push_msat,
6728 pending_inbound_htlcs: Vec::new(),
6729 pending_outbound_htlcs: Vec::new(),
6730 holding_cell_htlc_updates: Vec::new(),
6731 pending_update_fee: None,
6732 holding_cell_update_fee: None,
6733 next_holder_htlc_id: 0,
6734 next_counterparty_htlc_id: 0,
6735 update_time_counter: 1,
6737 resend_order: RAACommitmentOrder::CommitmentFirst,
6739 monitor_pending_channel_ready: false,
6740 monitor_pending_revoke_and_ack: false,
6741 monitor_pending_commitment_signed: false,
6742 monitor_pending_forwards: Vec::new(),
6743 monitor_pending_failures: Vec::new(),
6744 monitor_pending_finalized_fulfills: Vec::new(),
6746 signer_pending_commitment_update: false,
6747 signer_pending_funding: false,
6749 #[cfg(debug_assertions)]
6750 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6751 #[cfg(debug_assertions)]
6752 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6754 last_sent_closing_fee: None,
6755 pending_counterparty_closing_signed: None,
6756 expecting_peer_commitment_signed: false,
6757 closing_fee_limits: None,
6758 target_closing_feerate_sats_per_kw: None,
6760 funding_tx_confirmed_in: None,
6761 funding_tx_confirmation_height: 0,
6762 short_channel_id: None,
6763 channel_creation_height: current_chain_height,
6765 feerate_per_kw: msg.feerate_per_kw,
6766 channel_value_satoshis: msg.funding_satoshis,
6767 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6768 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6769 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6770 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6771 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6772 holder_selected_channel_reserve_satoshis,
6773 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6774 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6775 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6776 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6779 counterparty_forwarding_info: None,
6781 channel_transaction_parameters: ChannelTransactionParameters {
6782 holder_pubkeys: pubkeys,
6783 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6784 is_outbound_from_holder: false,
6785 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6786 selected_contest_delay: msg.to_self_delay,
6787 pubkeys: counterparty_pubkeys,
6789 funding_outpoint: None,
6790 channel_type_features: channel_type.clone()
6792 funding_transaction: None,
6793 is_batch_funding: None,
6795 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6796 counterparty_prev_commitment_point: None,
6797 counterparty_node_id,
6799 counterparty_shutdown_scriptpubkey,
6801 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6803 channel_update_status: ChannelUpdateStatus::Enabled,
6804 closing_signed_in_flight: false,
6806 announcement_sigs: None,
6808 #[cfg(any(test, fuzzing))]
6809 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6810 #[cfg(any(test, fuzzing))]
6811 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6813 workaround_lnd_bug_4006: None,
6814 sent_message_awaiting_response: None,
6816 latest_inbound_scid_alias: None,
6817 outbound_scid_alias: 0,
6819 channel_pending_event_emitted: false,
6820 channel_ready_event_emitted: false,
6822 #[cfg(any(test, fuzzing))]
6823 historical_inbound_htlc_fulfills: HashSet::new(),
6828 blocked_monitor_updates: Vec::new(),
6830 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6836 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6837 /// should be sent back to the counterparty node.
6839 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6840 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6841 if self.context.is_outbound() {
6842 panic!("Tried to send accept_channel for an outbound channel?");
6844 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6845 panic!("Tried to send accept_channel after channel had moved forward");
6847 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6848 panic!("Tried to send an accept_channel for a channel that has already advanced");
6851 self.generate_accept_channel_message()
6854 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6855 /// inbound channel. If the intention is to accept an inbound channel, use
6856 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6858 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6859 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6860 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6861 let keys = self.context.get_holder_pubkeys();
6863 msgs::AcceptChannel {
6864 temporary_channel_id: self.context.channel_id,
6865 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6866 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6867 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6868 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6869 minimum_depth: self.context.minimum_depth.unwrap(),
6870 to_self_delay: self.context.get_holder_selected_contest_delay(),
6871 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6872 funding_pubkey: keys.funding_pubkey,
6873 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6874 payment_point: keys.payment_point,
6875 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6876 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6877 first_per_commitment_point,
6878 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6879 Some(script) => script.clone().into_inner(),
6880 None => Builder::new().into_script(),
6882 channel_type: Some(self.context.channel_type.clone()),
6884 next_local_nonce: None,
6888 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6889 /// inbound channel without accepting it.
6891 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6893 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6894 self.generate_accept_channel_message()
6897 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
6898 let funding_script = self.context.get_funding_redeemscript();
6900 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6901 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6902 let trusted_tx = initial_commitment_tx.trust();
6903 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6904 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6905 // They sign the holder commitment transaction...
6906 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6907 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6908 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6909 encode::serialize_hex(&funding_script), &self.context.channel_id());
6910 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6912 Ok(initial_commitment_tx)
6915 pub fn funding_created<L: Deref>(
6916 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6917 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
6921 if self.context.is_outbound() {
6922 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6924 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6925 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6926 // remember the channel, so it's safe to just send an error_message here and drop the
6928 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6930 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6931 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6932 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6933 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6936 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6937 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6938 // This is an externally observable change before we finish all our checks. In particular
6939 // check_funding_created_signature may fail.
6940 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6942 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
6944 Err(ChannelError::Close(e)) => {
6945 self.context.channel_transaction_parameters.funding_outpoint = None;
6946 return Err((self, ChannelError::Close(e)));
6949 // The only error we know how to handle is ChannelError::Close, so we fall over here
6950 // to make sure we don't continue with an inconsistent state.
6951 panic!("unexpected error type from check_funding_created_signature {:?}", e);
6955 let holder_commitment_tx = HolderCommitmentTransaction::new(
6956 initial_commitment_tx,
6959 &self.context.get_holder_pubkeys().funding_pubkey,
6960 self.context.counterparty_funding_pubkey()
6963 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6964 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6967 // Now that we're past error-generating stuff, update our local state:
6969 self.context.channel_state = ChannelState::FundingSent as u32;
6970 self.context.channel_id = funding_txo.to_channel_id();
6971 self.context.cur_counterparty_commitment_transaction_number -= 1;
6972 self.context.cur_holder_commitment_transaction_number -= 1;
6974 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
6976 let funding_redeemscript = self.context.get_funding_redeemscript();
6977 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6978 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6979 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6980 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6981 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6982 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6983 shutdown_script, self.context.get_holder_selected_contest_delay(),
6984 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6985 &self.context.channel_transaction_parameters,
6986 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6988 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6989 let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
6990 channel_monitor.provide_initial_counterparty_commitment_tx(
6991 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
6992 self.context.cur_counterparty_commitment_transaction_number + 1,
6993 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
6994 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6995 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
6997 log_info!(logger, "{} funding_signed for peer for channel {}",
6998 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7000 // Promote the channel to a full-fledged one now that we have updated the state and have a
7001 // `ChannelMonitor`.
7002 let mut channel = Channel {
7003 context: self.context,
7005 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7006 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7008 Ok((channel, funding_signed, channel_monitor))
7012 const SERIALIZATION_VERSION: u8 = 3;
7013 const MIN_SERIALIZATION_VERSION: u8 = 3;
7015 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7021 impl Writeable for ChannelUpdateStatus {
7022 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7023 // We only care about writing out the current state as it was announced, ie only either
7024 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7025 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7027 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7028 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7029 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7030 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7036 impl Readable for ChannelUpdateStatus {
7037 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7038 Ok(match <u8 as Readable>::read(reader)? {
7039 0 => ChannelUpdateStatus::Enabled,
7040 1 => ChannelUpdateStatus::Disabled,
7041 _ => return Err(DecodeError::InvalidValue),
7046 impl Writeable for AnnouncementSigsState {
7047 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7048 // We only care about writing out the current state as if we had just disconnected, at
7049 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7051 AnnouncementSigsState::NotSent => 0u8.write(writer),
7052 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7053 AnnouncementSigsState::Committed => 0u8.write(writer),
7054 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7059 impl Readable for AnnouncementSigsState {
7060 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7061 Ok(match <u8 as Readable>::read(reader)? {
7062 0 => AnnouncementSigsState::NotSent,
7063 1 => AnnouncementSigsState::PeerReceived,
7064 _ => return Err(DecodeError::InvalidValue),
7069 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7070 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7071 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7074 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7076 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7077 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7078 // the low bytes now and the optional high bytes later.
7079 let user_id_low = self.context.user_id as u64;
7080 user_id_low.write(writer)?;
7082 // Version 1 deserializers expected to read parts of the config object here. Version 2
7083 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7084 // `minimum_depth` we simply write dummy values here.
7085 writer.write_all(&[0; 8])?;
7087 self.context.channel_id.write(writer)?;
7088 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
7089 self.context.channel_value_satoshis.write(writer)?;
7091 self.context.latest_monitor_update_id.write(writer)?;
7093 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7094 // deserialized from that format.
7095 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7096 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7097 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7099 self.context.destination_script.write(writer)?;
7101 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7102 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7103 self.context.value_to_self_msat.write(writer)?;
7105 let mut dropped_inbound_htlcs = 0;
7106 for htlc in self.context.pending_inbound_htlcs.iter() {
7107 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7108 dropped_inbound_htlcs += 1;
7111 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7112 for htlc in self.context.pending_inbound_htlcs.iter() {
7113 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7116 htlc.htlc_id.write(writer)?;
7117 htlc.amount_msat.write(writer)?;
7118 htlc.cltv_expiry.write(writer)?;
7119 htlc.payment_hash.write(writer)?;
7121 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7122 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7124 htlc_state.write(writer)?;
7126 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7128 htlc_state.write(writer)?;
7130 &InboundHTLCState::Committed => {
7133 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7135 removal_reason.write(writer)?;
7140 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7141 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7142 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7144 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7145 for (idx, htlc) in self.context.pending_outbound_htlcs.iter().enumerate() {
7146 htlc.htlc_id.write(writer)?;
7147 htlc.amount_msat.write(writer)?;
7148 htlc.cltv_expiry.write(writer)?;
7149 htlc.payment_hash.write(writer)?;
7150 htlc.source.write(writer)?;
7152 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7154 onion_packet.write(writer)?;
7156 &OutboundHTLCState::Committed => {
7159 &OutboundHTLCState::RemoteRemoved(_) => {
7160 // Treat this as a Committed because we haven't received the CS - they'll
7161 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7164 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7166 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7167 preimages.push(preimage);
7169 let reason: Option<&HTLCFailReason> = outcome.into();
7170 reason.write(writer)?;
7172 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7174 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7175 preimages.push(preimage);
7177 let reason: Option<&HTLCFailReason> = outcome.into();
7178 reason.write(writer)?;
7181 if let Some(skimmed_fee) = htlc.skimmed_fee_msat {
7182 if pending_outbound_skimmed_fees.is_empty() {
7183 for _ in 0..idx { pending_outbound_skimmed_fees.push(None); }
7185 pending_outbound_skimmed_fees.push(Some(skimmed_fee));
7186 } else if !pending_outbound_skimmed_fees.is_empty() {
7187 pending_outbound_skimmed_fees.push(None);
7189 pending_outbound_blinding_points.push(htlc.blinding_point);
7192 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7193 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7194 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7195 for (idx, update) in self.context.holding_cell_htlc_updates.iter().enumerate() {
7197 &HTLCUpdateAwaitingACK::AddHTLC {
7198 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7199 blinding_point, skimmed_fee_msat,
7202 amount_msat.write(writer)?;
7203 cltv_expiry.write(writer)?;
7204 payment_hash.write(writer)?;
7205 source.write(writer)?;
7206 onion_routing_packet.write(writer)?;
7208 if let Some(skimmed_fee) = skimmed_fee_msat {
7209 if holding_cell_skimmed_fees.is_empty() {
7210 for _ in 0..idx { holding_cell_skimmed_fees.push(None); }
7212 holding_cell_skimmed_fees.push(Some(skimmed_fee));
7213 } else if !holding_cell_skimmed_fees.is_empty() { holding_cell_skimmed_fees.push(None); }
7215 holding_cell_blinding_points.push(blinding_point);
7217 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7219 payment_preimage.write(writer)?;
7220 htlc_id.write(writer)?;
7222 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7224 htlc_id.write(writer)?;
7225 err_packet.write(writer)?;
7230 match self.context.resend_order {
7231 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7232 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7235 self.context.monitor_pending_channel_ready.write(writer)?;
7236 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7237 self.context.monitor_pending_commitment_signed.write(writer)?;
7239 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7240 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7241 pending_forward.write(writer)?;
7242 htlc_id.write(writer)?;
7245 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7246 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7247 htlc_source.write(writer)?;
7248 payment_hash.write(writer)?;
7249 fail_reason.write(writer)?;
7252 if self.context.is_outbound() {
7253 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7254 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7255 Some(feerate).write(writer)?;
7257 // As for inbound HTLCs, if the update was only announced and never committed in a
7258 // commitment_signed, drop it.
7259 None::<u32>.write(writer)?;
7261 self.context.holding_cell_update_fee.write(writer)?;
7263 self.context.next_holder_htlc_id.write(writer)?;
7264 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7265 self.context.update_time_counter.write(writer)?;
7266 self.context.feerate_per_kw.write(writer)?;
7268 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7269 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7270 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7271 // consider the stale state on reload.
7274 self.context.funding_tx_confirmed_in.write(writer)?;
7275 self.context.funding_tx_confirmation_height.write(writer)?;
7276 self.context.short_channel_id.write(writer)?;
7278 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7279 self.context.holder_dust_limit_satoshis.write(writer)?;
7280 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7282 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7283 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7285 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7286 self.context.holder_htlc_minimum_msat.write(writer)?;
7287 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7289 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7290 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7292 match &self.context.counterparty_forwarding_info {
7295 info.fee_base_msat.write(writer)?;
7296 info.fee_proportional_millionths.write(writer)?;
7297 info.cltv_expiry_delta.write(writer)?;
7299 None => 0u8.write(writer)?
7302 self.context.channel_transaction_parameters.write(writer)?;
7303 self.context.funding_transaction.write(writer)?;
7305 self.context.counterparty_cur_commitment_point.write(writer)?;
7306 self.context.counterparty_prev_commitment_point.write(writer)?;
7307 self.context.counterparty_node_id.write(writer)?;
7309 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7311 self.context.commitment_secrets.write(writer)?;
7313 self.context.channel_update_status.write(writer)?;
7315 #[cfg(any(test, fuzzing))]
7316 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7317 #[cfg(any(test, fuzzing))]
7318 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7319 htlc.write(writer)?;
7322 // If the channel type is something other than only-static-remote-key, then we need to have
7323 // older clients fail to deserialize this channel at all. If the type is
7324 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7326 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7327 Some(&self.context.channel_type) } else { None };
7329 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7330 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7331 // a different percentage of the channel value then 10%, which older versions of LDK used
7332 // to set it to before the percentage was made configurable.
7333 let serialized_holder_selected_reserve =
7334 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7335 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7337 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7338 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7339 let serialized_holder_htlc_max_in_flight =
7340 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7341 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7343 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7344 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7346 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7347 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7348 // we write the high bytes as an option here.
7349 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7351 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7353 write_tlv_fields!(writer, {
7354 (0, self.context.announcement_sigs, option),
7355 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7356 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7357 // them twice, once with their original default values above, and once as an option
7358 // here. On the read side, old versions will simply ignore the odd-type entries here,
7359 // and new versions map the default values to None and allow the TLV entries here to
7361 (1, self.context.minimum_depth, option),
7362 (2, chan_type, option),
7363 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7364 (4, serialized_holder_selected_reserve, option),
7365 (5, self.context.config, required),
7366 (6, serialized_holder_htlc_max_in_flight, option),
7367 (7, self.context.shutdown_scriptpubkey, option),
7368 (8, self.context.blocked_monitor_updates, optional_vec),
7369 (9, self.context.target_closing_feerate_sats_per_kw, option),
7370 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7371 (13, self.context.channel_creation_height, required),
7372 (15, preimages, required_vec),
7373 (17, self.context.announcement_sigs_state, required),
7374 (19, self.context.latest_inbound_scid_alias, option),
7375 (21, self.context.outbound_scid_alias, required),
7376 (23, channel_ready_event_emitted, option),
7377 (25, user_id_high_opt, option),
7378 (27, self.context.channel_keys_id, required),
7379 (28, holder_max_accepted_htlcs, option),
7380 (29, self.context.temporary_channel_id, option),
7381 (31, channel_pending_event_emitted, option),
7382 (35, pending_outbound_skimmed_fees, optional_vec),
7383 (37, holding_cell_skimmed_fees, optional_vec),
7384 (38, self.context.is_batch_funding, option),
7385 (39, pending_outbound_blinding_points, optional_vec),
7386 (41, holding_cell_blinding_points, optional_vec),
7393 const MAX_ALLOC_SIZE: usize = 64*1024;
7394 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7396 ES::Target: EntropySource,
7397 SP::Target: SignerProvider
7399 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7400 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7401 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7403 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7404 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7405 // the low bytes now and the high bytes later.
7406 let user_id_low: u64 = Readable::read(reader)?;
7408 let mut config = Some(LegacyChannelConfig::default());
7410 // Read the old serialization of the ChannelConfig from version 0.0.98.
7411 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7412 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7413 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7414 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7416 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7417 let mut _val: u64 = Readable::read(reader)?;
7420 let channel_id = Readable::read(reader)?;
7421 let channel_state = Readable::read(reader)?;
7422 let channel_value_satoshis = Readable::read(reader)?;
7424 let latest_monitor_update_id = Readable::read(reader)?;
7426 let mut keys_data = None;
7428 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7429 // the `channel_keys_id` TLV is present below.
7430 let keys_len: u32 = Readable::read(reader)?;
7431 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7432 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7433 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7434 let mut data = [0; 1024];
7435 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7436 reader.read_exact(read_slice)?;
7437 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7441 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7442 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7443 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7446 let destination_script = Readable::read(reader)?;
7448 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7449 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7450 let value_to_self_msat = Readable::read(reader)?;
7452 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7454 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7455 for _ in 0..pending_inbound_htlc_count {
7456 pending_inbound_htlcs.push(InboundHTLCOutput {
7457 htlc_id: Readable::read(reader)?,
7458 amount_msat: Readable::read(reader)?,
7459 cltv_expiry: Readable::read(reader)?,
7460 payment_hash: Readable::read(reader)?,
7461 state: match <u8 as Readable>::read(reader)? {
7462 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7463 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7464 3 => InboundHTLCState::Committed,
7465 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7466 _ => return Err(DecodeError::InvalidValue),
7471 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7472 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7473 for _ in 0..pending_outbound_htlc_count {
7474 pending_outbound_htlcs.push(OutboundHTLCOutput {
7475 htlc_id: Readable::read(reader)?,
7476 amount_msat: Readable::read(reader)?,
7477 cltv_expiry: Readable::read(reader)?,
7478 payment_hash: Readable::read(reader)?,
7479 source: Readable::read(reader)?,
7480 state: match <u8 as Readable>::read(reader)? {
7481 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7482 1 => OutboundHTLCState::Committed,
7484 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7485 OutboundHTLCState::RemoteRemoved(option.into())
7488 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7489 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7492 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7493 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7495 _ => return Err(DecodeError::InvalidValue),
7497 skimmed_fee_msat: None,
7498 blinding_point: None,
7502 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7503 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7504 for _ in 0..holding_cell_htlc_update_count {
7505 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7506 0 => HTLCUpdateAwaitingACK::AddHTLC {
7507 amount_msat: Readable::read(reader)?,
7508 cltv_expiry: Readable::read(reader)?,
7509 payment_hash: Readable::read(reader)?,
7510 source: Readable::read(reader)?,
7511 onion_routing_packet: Readable::read(reader)?,
7512 skimmed_fee_msat: None,
7513 blinding_point: None,
7515 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7516 payment_preimage: Readable::read(reader)?,
7517 htlc_id: Readable::read(reader)?,
7519 2 => HTLCUpdateAwaitingACK::FailHTLC {
7520 htlc_id: Readable::read(reader)?,
7521 err_packet: Readable::read(reader)?,
7523 _ => return Err(DecodeError::InvalidValue),
7527 let resend_order = match <u8 as Readable>::read(reader)? {
7528 0 => RAACommitmentOrder::CommitmentFirst,
7529 1 => RAACommitmentOrder::RevokeAndACKFirst,
7530 _ => return Err(DecodeError::InvalidValue),
7533 let monitor_pending_channel_ready = Readable::read(reader)?;
7534 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7535 let monitor_pending_commitment_signed = Readable::read(reader)?;
7537 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7538 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7539 for _ in 0..monitor_pending_forwards_count {
7540 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7543 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7544 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7545 for _ in 0..monitor_pending_failures_count {
7546 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7549 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7551 let holding_cell_update_fee = Readable::read(reader)?;
7553 let next_holder_htlc_id = Readable::read(reader)?;
7554 let next_counterparty_htlc_id = Readable::read(reader)?;
7555 let update_time_counter = Readable::read(reader)?;
7556 let feerate_per_kw = Readable::read(reader)?;
7558 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7559 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7560 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7561 // consider the stale state on reload.
7562 match <u8 as Readable>::read(reader)? {
7565 let _: u32 = Readable::read(reader)?;
7566 let _: u64 = Readable::read(reader)?;
7567 let _: Signature = Readable::read(reader)?;
7569 _ => return Err(DecodeError::InvalidValue),
7572 let funding_tx_confirmed_in = Readable::read(reader)?;
7573 let funding_tx_confirmation_height = Readable::read(reader)?;
7574 let short_channel_id = Readable::read(reader)?;
7576 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7577 let holder_dust_limit_satoshis = Readable::read(reader)?;
7578 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7579 let mut counterparty_selected_channel_reserve_satoshis = None;
7581 // Read the old serialization from version 0.0.98.
7582 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7584 // Read the 8 bytes of backwards-compatibility data.
7585 let _dummy: u64 = Readable::read(reader)?;
7587 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7588 let holder_htlc_minimum_msat = Readable::read(reader)?;
7589 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7591 let mut minimum_depth = None;
7593 // Read the old serialization from version 0.0.98.
7594 minimum_depth = Some(Readable::read(reader)?);
7596 // Read the 4 bytes of backwards-compatibility data.
7597 let _dummy: u32 = Readable::read(reader)?;
7600 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7602 1 => Some(CounterpartyForwardingInfo {
7603 fee_base_msat: Readable::read(reader)?,
7604 fee_proportional_millionths: Readable::read(reader)?,
7605 cltv_expiry_delta: Readable::read(reader)?,
7607 _ => return Err(DecodeError::InvalidValue),
7610 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7611 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7613 let counterparty_cur_commitment_point = Readable::read(reader)?;
7615 let counterparty_prev_commitment_point = Readable::read(reader)?;
7616 let counterparty_node_id = Readable::read(reader)?;
7618 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7619 let commitment_secrets = Readable::read(reader)?;
7621 let channel_update_status = Readable::read(reader)?;
7623 #[cfg(any(test, fuzzing))]
7624 let mut historical_inbound_htlc_fulfills = HashSet::new();
7625 #[cfg(any(test, fuzzing))]
7627 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7628 for _ in 0..htlc_fulfills_len {
7629 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7633 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7634 Some((feerate, if channel_parameters.is_outbound_from_holder {
7635 FeeUpdateState::Outbound
7637 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7643 let mut announcement_sigs = None;
7644 let mut target_closing_feerate_sats_per_kw = None;
7645 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7646 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7647 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7648 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7649 // only, so we default to that if none was written.
7650 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7651 let mut channel_creation_height = Some(serialized_height);
7652 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7654 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7655 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7656 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7657 let mut latest_inbound_scid_alias = None;
7658 let mut outbound_scid_alias = None;
7659 let mut channel_pending_event_emitted = None;
7660 let mut channel_ready_event_emitted = None;
7662 let mut user_id_high_opt: Option<u64> = None;
7663 let mut channel_keys_id: Option<[u8; 32]> = None;
7664 let mut temporary_channel_id: Option<ChannelId> = None;
7665 let mut holder_max_accepted_htlcs: Option<u16> = None;
7667 let mut blocked_monitor_updates = Some(Vec::new());
7669 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7670 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7672 let mut is_batch_funding: Option<()> = None;
7674 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7675 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7677 read_tlv_fields!(reader, {
7678 (0, announcement_sigs, option),
7679 (1, minimum_depth, option),
7680 (2, channel_type, option),
7681 (3, counterparty_selected_channel_reserve_satoshis, option),
7682 (4, holder_selected_channel_reserve_satoshis, option),
7683 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7684 (6, holder_max_htlc_value_in_flight_msat, option),
7685 (7, shutdown_scriptpubkey, option),
7686 (8, blocked_monitor_updates, optional_vec),
7687 (9, target_closing_feerate_sats_per_kw, option),
7688 (11, monitor_pending_finalized_fulfills, optional_vec),
7689 (13, channel_creation_height, option),
7690 (15, preimages_opt, optional_vec),
7691 (17, announcement_sigs_state, option),
7692 (19, latest_inbound_scid_alias, option),
7693 (21, outbound_scid_alias, option),
7694 (23, channel_ready_event_emitted, option),
7695 (25, user_id_high_opt, option),
7696 (27, channel_keys_id, option),
7697 (28, holder_max_accepted_htlcs, option),
7698 (29, temporary_channel_id, option),
7699 (31, channel_pending_event_emitted, option),
7700 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7701 (37, holding_cell_skimmed_fees_opt, optional_vec),
7702 (38, is_batch_funding, option),
7703 (39, pending_outbound_blinding_points_opt, optional_vec),
7704 (41, holding_cell_blinding_points_opt, optional_vec),
7707 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7708 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7709 // If we've gotten to the funding stage of the channel, populate the signer with its
7710 // required channel parameters.
7711 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7712 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7713 holder_signer.provide_channel_parameters(&channel_parameters);
7715 (channel_keys_id, holder_signer)
7717 // `keys_data` can be `None` if we had corrupted data.
7718 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7719 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7720 (holder_signer.channel_keys_id(), holder_signer)
7723 if let Some(preimages) = preimages_opt {
7724 let mut iter = preimages.into_iter();
7725 for htlc in pending_outbound_htlcs.iter_mut() {
7727 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7728 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7730 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7731 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7736 // We expect all preimages to be consumed above
7737 if iter.next().is_some() {
7738 return Err(DecodeError::InvalidValue);
7742 let chan_features = channel_type.as_ref().unwrap();
7743 if !chan_features.is_subset(our_supported_features) {
7744 // If the channel was written by a new version and negotiated with features we don't
7745 // understand yet, refuse to read it.
7746 return Err(DecodeError::UnknownRequiredFeature);
7749 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7750 // To account for that, we're proactively setting/overriding the field here.
7751 channel_parameters.channel_type_features = chan_features.clone();
7753 let mut secp_ctx = Secp256k1::new();
7754 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7756 // `user_id` used to be a single u64 value. In order to remain backwards
7757 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7758 // separate u64 values.
7759 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7761 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7763 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7764 let mut iter = skimmed_fees.into_iter();
7765 for htlc in pending_outbound_htlcs.iter_mut() {
7766 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7768 // We expect all skimmed fees to be consumed above
7769 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7771 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7772 let mut iter = skimmed_fees.into_iter();
7773 for htlc in holding_cell_htlc_updates.iter_mut() {
7774 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7775 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7778 // We expect all skimmed fees to be consumed above
7779 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7781 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
7782 let mut iter = blinding_pts.into_iter();
7783 for htlc in pending_outbound_htlcs.iter_mut() {
7784 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7786 // We expect all blinding points to be consumed above
7787 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7789 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
7790 let mut iter = blinding_pts.into_iter();
7791 for htlc in holding_cell_htlc_updates.iter_mut() {
7792 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
7793 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7796 // We expect all blinding points to be consumed above
7797 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7801 context: ChannelContext {
7804 config: config.unwrap(),
7808 // Note that we don't care about serializing handshake limits as we only ever serialize
7809 // channel data after the handshake has completed.
7810 inbound_handshake_limits_override: None,
7813 temporary_channel_id,
7815 announcement_sigs_state: announcement_sigs_state.unwrap(),
7817 channel_value_satoshis,
7819 latest_monitor_update_id,
7821 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7822 shutdown_scriptpubkey,
7825 cur_holder_commitment_transaction_number,
7826 cur_counterparty_commitment_transaction_number,
7829 holder_max_accepted_htlcs,
7830 pending_inbound_htlcs,
7831 pending_outbound_htlcs,
7832 holding_cell_htlc_updates,
7836 monitor_pending_channel_ready,
7837 monitor_pending_revoke_and_ack,
7838 monitor_pending_commitment_signed,
7839 monitor_pending_forwards,
7840 monitor_pending_failures,
7841 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7843 signer_pending_commitment_update: false,
7844 signer_pending_funding: false,
7847 holding_cell_update_fee,
7848 next_holder_htlc_id,
7849 next_counterparty_htlc_id,
7850 update_time_counter,
7853 #[cfg(debug_assertions)]
7854 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7855 #[cfg(debug_assertions)]
7856 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7858 last_sent_closing_fee: None,
7859 pending_counterparty_closing_signed: None,
7860 expecting_peer_commitment_signed: false,
7861 closing_fee_limits: None,
7862 target_closing_feerate_sats_per_kw,
7864 funding_tx_confirmed_in,
7865 funding_tx_confirmation_height,
7867 channel_creation_height: channel_creation_height.unwrap(),
7869 counterparty_dust_limit_satoshis,
7870 holder_dust_limit_satoshis,
7871 counterparty_max_htlc_value_in_flight_msat,
7872 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7873 counterparty_selected_channel_reserve_satoshis,
7874 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7875 counterparty_htlc_minimum_msat,
7876 holder_htlc_minimum_msat,
7877 counterparty_max_accepted_htlcs,
7880 counterparty_forwarding_info,
7882 channel_transaction_parameters: channel_parameters,
7883 funding_transaction,
7886 counterparty_cur_commitment_point,
7887 counterparty_prev_commitment_point,
7888 counterparty_node_id,
7890 counterparty_shutdown_scriptpubkey,
7894 channel_update_status,
7895 closing_signed_in_flight: false,
7899 #[cfg(any(test, fuzzing))]
7900 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7901 #[cfg(any(test, fuzzing))]
7902 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7904 workaround_lnd_bug_4006: None,
7905 sent_message_awaiting_response: None,
7907 latest_inbound_scid_alias,
7908 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7909 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7911 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7912 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7914 #[cfg(any(test, fuzzing))]
7915 historical_inbound_htlc_fulfills,
7917 channel_type: channel_type.unwrap(),
7920 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7929 use bitcoin::blockdata::constants::ChainHash;
7930 use bitcoin::blockdata::script::{ScriptBuf, Builder};
7931 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7932 use bitcoin::blockdata::opcodes;
7933 use bitcoin::network::constants::Network;
7934 use crate::ln::{PaymentHash, PaymentPreimage};
7935 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
7936 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7937 use crate::ln::channel::InitFeatures;
7938 use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
7939 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7940 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
7941 use crate::ln::msgs;
7942 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7943 use crate::ln::script::ShutdownScript;
7944 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
7945 use crate::chain::BestBlock;
7946 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7947 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7948 use crate::chain::transaction::OutPoint;
7949 use crate::routing::router::{Path, RouteHop};
7950 use crate::util::config::UserConfig;
7951 use crate::util::errors::APIError;
7952 use crate::util::ser::{ReadableArgs, Writeable};
7953 use crate::util::test_utils;
7954 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7955 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7956 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7957 use bitcoin::secp256k1::{SecretKey,PublicKey};
7958 use bitcoin::hashes::sha256::Hash as Sha256;
7959 use bitcoin::hashes::Hash;
7960 use bitcoin::hashes::hex::FromHex;
7961 use bitcoin::hash_types::WPubkeyHash;
7962 use bitcoin::blockdata::locktime::absolute::LockTime;
7963 use bitcoin::address::{WitnessProgram, WitnessVersion};
7964 use crate::prelude::*;
7966 struct TestFeeEstimator {
7969 impl FeeEstimator for TestFeeEstimator {
7970 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7976 fn test_max_funding_satoshis_no_wumbo() {
7977 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7978 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7979 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7983 signer: InMemorySigner,
7986 impl EntropySource for Keys {
7987 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7990 impl SignerProvider for Keys {
7991 type EcdsaSigner = InMemorySigner;
7993 type TaprootSigner = InMemorySigner;
7995 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7996 self.signer.channel_keys_id()
7999 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8003 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8005 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8006 let secp_ctx = Secp256k1::signing_only();
8007 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8008 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8009 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8012 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8013 let secp_ctx = Secp256k1::signing_only();
8014 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8015 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8019 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8020 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8021 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8025 fn upfront_shutdown_script_incompatibility() {
8026 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8027 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8028 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8031 let seed = [42; 32];
8032 let network = Network::Testnet;
8033 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8034 keys_provider.expect(OnGetShutdownScriptpubkey {
8035 returns: non_v0_segwit_shutdown_script.clone(),
8038 let secp_ctx = Secp256k1::new();
8039 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8040 let config = UserConfig::default();
8041 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8042 Err(APIError::IncompatibleShutdownScript { script }) => {
8043 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8045 Err(e) => panic!("Unexpected error: {:?}", e),
8046 Ok(_) => panic!("Expected error"),
8050 // Check that, during channel creation, we use the same feerate in the open channel message
8051 // as we do in the Channel object creation itself.
8053 fn test_open_channel_msg_fee() {
8054 let original_fee = 253;
8055 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8056 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8057 let secp_ctx = Secp256k1::new();
8058 let seed = [42; 32];
8059 let network = Network::Testnet;
8060 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8062 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8063 let config = UserConfig::default();
8064 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8066 // Now change the fee so we can check that the fee in the open_channel message is the
8067 // same as the old fee.
8068 fee_est.fee_est = 500;
8069 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8070 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8074 fn test_holder_vs_counterparty_dust_limit() {
8075 // Test that when calculating the local and remote commitment transaction fees, the correct
8076 // dust limits are used.
8077 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8078 let secp_ctx = Secp256k1::new();
8079 let seed = [42; 32];
8080 let network = Network::Testnet;
8081 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8082 let logger = test_utils::TestLogger::new();
8083 let best_block = BestBlock::from_network(network);
8085 // Go through the flow of opening a channel between two nodes, making sure
8086 // they have different dust limits.
8088 // Create Node A's channel pointing to Node B's pubkey
8089 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8090 let config = UserConfig::default();
8091 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8093 // Create Node B's channel by receiving Node A's open_channel message
8094 // Make sure A's dust limit is as we expect.
8095 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8096 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8097 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8099 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8100 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8101 accept_channel_msg.dust_limit_satoshis = 546;
8102 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8103 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8105 // Node A --> Node B: funding created
8106 let output_script = node_a_chan.context.get_funding_redeemscript();
8107 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8108 value: 10000000, script_pubkey: output_script.clone(),
8110 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8111 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8112 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8114 // Node B --> Node A: funding signed
8115 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8117 // Put some inbound and outbound HTLCs in A's channel.
8118 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8119 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8121 amount_msat: htlc_amount_msat,
8122 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8123 cltv_expiry: 300000000,
8124 state: InboundHTLCState::Committed,
8127 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8129 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8130 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8131 cltv_expiry: 200000000,
8132 state: OutboundHTLCState::Committed,
8133 source: HTLCSource::OutboundRoute {
8134 path: Path { hops: Vec::new(), blinded_tail: None },
8135 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8136 first_hop_htlc_msat: 548,
8137 payment_id: PaymentId([42; 32]),
8139 skimmed_fee_msat: None,
8140 blinding_point: None,
8143 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8144 // the dust limit check.
8145 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8146 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8147 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8148 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8150 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8151 // of the HTLCs are seen to be above the dust limit.
8152 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8153 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8154 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8155 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8156 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8160 fn test_timeout_vs_success_htlc_dust_limit() {
8161 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8162 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8163 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8164 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8165 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8166 let secp_ctx = Secp256k1::new();
8167 let seed = [42; 32];
8168 let network = Network::Testnet;
8169 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8171 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8172 let config = UserConfig::default();
8173 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8175 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8176 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8178 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8179 // counted as dust when it shouldn't be.
8180 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8181 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8182 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8183 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8185 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8186 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8187 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8188 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8189 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8191 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8193 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8194 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8195 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8196 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8197 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8199 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8200 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8201 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8202 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8203 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8207 fn channel_reestablish_no_updates() {
8208 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8209 let logger = test_utils::TestLogger::new();
8210 let secp_ctx = Secp256k1::new();
8211 let seed = [42; 32];
8212 let network = Network::Testnet;
8213 let best_block = BestBlock::from_network(network);
8214 let chain_hash = ChainHash::using_genesis_block(network);
8215 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8217 // Go through the flow of opening a channel between two nodes.
8219 // Create Node A's channel pointing to Node B's pubkey
8220 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8221 let config = UserConfig::default();
8222 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8224 // Create Node B's channel by receiving Node A's open_channel message
8225 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8226 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8227 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8229 // Node B --> Node A: accept channel
8230 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8231 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8233 // Node A --> Node B: funding created
8234 let output_script = node_a_chan.context.get_funding_redeemscript();
8235 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8236 value: 10000000, script_pubkey: output_script.clone(),
8238 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8239 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8240 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8242 // Node B --> Node A: funding signed
8243 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8245 // Now disconnect the two nodes and check that the commitment point in
8246 // Node B's channel_reestablish message is sane.
8247 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8248 let msg = node_b_chan.get_channel_reestablish(&&logger);
8249 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8250 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8251 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8253 // Check that the commitment point in Node A's channel_reestablish message
8255 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8256 let msg = node_a_chan.get_channel_reestablish(&&logger);
8257 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8258 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8259 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8263 fn test_configured_holder_max_htlc_value_in_flight() {
8264 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8265 let logger = test_utils::TestLogger::new();
8266 let secp_ctx = Secp256k1::new();
8267 let seed = [42; 32];
8268 let network = Network::Testnet;
8269 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8270 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8271 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8273 let mut config_2_percent = UserConfig::default();
8274 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8275 let mut config_99_percent = UserConfig::default();
8276 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8277 let mut config_0_percent = UserConfig::default();
8278 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8279 let mut config_101_percent = UserConfig::default();
8280 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8282 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8283 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8284 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8285 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8286 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8287 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8289 // Test with the upper bound - 1 of valid values (99%).
8290 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8291 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8292 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8294 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8296 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8297 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8298 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8299 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8300 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8301 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8303 // Test with the upper bound - 1 of valid values (99%).
8304 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8305 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8306 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8308 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8309 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8310 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8311 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8312 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8314 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8315 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8317 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8318 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8319 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8321 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8322 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8323 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8324 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8325 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8327 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8328 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8330 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8331 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8332 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8336 fn test_configured_holder_selected_channel_reserve_satoshis() {
8338 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8339 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8340 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8342 // Test with valid but unreasonably high channel reserves
8343 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8344 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8345 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8347 // Test with calculated channel reserve less than lower bound
8348 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8349 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8351 // Test with invalid channel reserves since sum of both is greater than or equal
8353 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8354 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8357 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8358 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8359 let logger = test_utils::TestLogger::new();
8360 let secp_ctx = Secp256k1::new();
8361 let seed = [42; 32];
8362 let network = Network::Testnet;
8363 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8364 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8365 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8368 let mut outbound_node_config = UserConfig::default();
8369 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8370 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8372 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8373 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8375 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8376 let mut inbound_node_config = UserConfig::default();
8377 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8379 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8380 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8382 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8384 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8385 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8387 // Channel Negotiations failed
8388 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8389 assert!(result.is_err());
8394 fn channel_update() {
8395 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8396 let logger = test_utils::TestLogger::new();
8397 let secp_ctx = Secp256k1::new();
8398 let seed = [42; 32];
8399 let network = Network::Testnet;
8400 let best_block = BestBlock::from_network(network);
8401 let chain_hash = ChainHash::using_genesis_block(network);
8402 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8404 // Create Node A's channel pointing to Node B's pubkey
8405 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8406 let config = UserConfig::default();
8407 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8409 // Create Node B's channel by receiving Node A's open_channel message
8410 // Make sure A's dust limit is as we expect.
8411 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8412 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8413 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8415 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8416 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8417 accept_channel_msg.dust_limit_satoshis = 546;
8418 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8419 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8421 // Node A --> Node B: funding created
8422 let output_script = node_a_chan.context.get_funding_redeemscript();
8423 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8424 value: 10000000, script_pubkey: output_script.clone(),
8426 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8427 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8428 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8430 // Node B --> Node A: funding signed
8431 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8433 // Make sure that receiving a channel update will update the Channel as expected.
8434 let update = ChannelUpdate {
8435 contents: UnsignedChannelUpdate {
8437 short_channel_id: 0,
8440 cltv_expiry_delta: 100,
8441 htlc_minimum_msat: 5,
8442 htlc_maximum_msat: MAX_VALUE_MSAT,
8444 fee_proportional_millionths: 11,
8445 excess_data: Vec::new(),
8447 signature: Signature::from(unsafe { FFISignature::new() })
8449 assert!(node_a_chan.channel_update(&update).unwrap());
8451 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8452 // change our official htlc_minimum_msat.
8453 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8454 match node_a_chan.context.counterparty_forwarding_info() {
8456 assert_eq!(info.cltv_expiry_delta, 100);
8457 assert_eq!(info.fee_base_msat, 110);
8458 assert_eq!(info.fee_proportional_millionths, 11);
8460 None => panic!("expected counterparty forwarding info to be Some")
8463 assert!(!node_a_chan.channel_update(&update).unwrap());
8467 fn blinding_point_ser() {
8468 // Ensure that channel blinding points are (de)serialized properly.
8469 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8470 let secp_ctx = Secp256k1::new();
8471 let seed = [42; 32];
8472 let network = Network::Testnet;
8473 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8475 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8476 let config = UserConfig::default();
8477 let features = channelmanager::provided_init_features(&config);
8478 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8479 let mut chan = Channel { context: outbound_chan.context };
8481 let dummy_htlc_source = HTLCSource::OutboundRoute {
8483 hops: vec![RouteHop {
8484 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8485 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8486 cltv_expiry_delta: 0, maybe_announced_channel: false,
8490 session_priv: test_utils::privkey(42),
8491 first_hop_htlc_msat: 0,
8492 payment_id: PaymentId([42; 32]),
8494 let dummy_outbound_output = OutboundHTLCOutput {
8497 payment_hash: PaymentHash([43; 32]),
8499 state: OutboundHTLCState::Committed,
8500 source: dummy_htlc_source.clone(),
8501 skimmed_fee_msat: None,
8502 blinding_point: None,
8504 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8505 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8507 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8510 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8512 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8515 payment_hash: PaymentHash([43; 32]),
8516 source: dummy_htlc_source.clone(),
8517 onion_routing_packet: msgs::OnionPacket {
8519 public_key: Ok(test_utils::pubkey(1)),
8520 hop_data: [0; 20*65],
8523 skimmed_fee_msat: None,
8524 blinding_point: None,
8526 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8527 payment_preimage: PaymentPreimage([42; 32]),
8530 let mut holding_cell_htlc_updates = Vec::with_capacity(10);
8533 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8534 } else if i % 3 == 1 {
8535 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8537 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8538 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = &mut dummy_add {
8539 *blinding_point = Some(test_utils::pubkey(42 + i));
8541 holding_cell_htlc_updates.push(dummy_add);
8544 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8546 // Encode and decode the channel and ensure that the HTLCs within are the same.
8547 let encoded_chan = chan.encode();
8548 let mut s = crate::io::Cursor::new(&encoded_chan);
8549 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8550 let features = channelmanager::provided_channel_type_features(&config);
8551 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8552 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8553 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8556 #[cfg(feature = "_test_vectors")]
8558 fn outbound_commitment_test() {
8559 use bitcoin::sighash;
8560 use bitcoin::consensus::encode::serialize;
8561 use bitcoin::sighash::EcdsaSighashType;
8562 use bitcoin::hashes::hex::FromHex;
8563 use bitcoin::hash_types::Txid;
8564 use bitcoin::secp256k1::Message;
8565 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8566 use crate::ln::PaymentPreimage;
8567 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8568 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8569 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8570 use crate::util::logger::Logger;
8571 use crate::sync::Arc;
8572 use core::str::FromStr;
8573 use hex::DisplayHex;
8575 // Test vectors from BOLT 3 Appendices C and F (anchors):
8576 let feeest = TestFeeEstimator{fee_est: 15000};
8577 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8578 let secp_ctx = Secp256k1::new();
8580 let mut signer = InMemorySigner::new(
8582 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8583 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8584 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8585 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8586 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8588 // These aren't set in the test vectors:
8589 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8595 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8596 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8597 let keys_provider = Keys { signer: signer.clone() };
8599 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8600 let mut config = UserConfig::default();
8601 config.channel_handshake_config.announced_channel = false;
8602 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8603 chan.context.holder_dust_limit_satoshis = 546;
8604 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8606 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8608 let counterparty_pubkeys = ChannelPublicKeys {
8609 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8610 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8611 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8612 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8613 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8615 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8616 CounterpartyChannelTransactionParameters {
8617 pubkeys: counterparty_pubkeys.clone(),
8618 selected_contest_delay: 144
8620 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8621 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8623 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8624 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8626 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8627 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8629 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8630 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8632 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8633 // derived from a commitment_seed, so instead we copy it here and call
8634 // build_commitment_transaction.
8635 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8636 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8637 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8638 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8639 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8641 macro_rules! test_commitment {
8642 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8643 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8644 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8648 macro_rules! test_commitment_with_anchors {
8649 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8650 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8651 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8655 macro_rules! test_commitment_common {
8656 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8657 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8659 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8660 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8662 let htlcs = commitment_stats.htlcs_included.drain(..)
8663 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8665 (commitment_stats.tx, htlcs)
8667 let trusted_tx = commitment_tx.trust();
8668 let unsigned_tx = trusted_tx.built_transaction();
8669 let redeemscript = chan.context.get_funding_redeemscript();
8670 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8671 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8672 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8673 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8675 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8676 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8677 let mut counterparty_htlc_sigs = Vec::new();
8678 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8680 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8681 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8682 counterparty_htlc_sigs.push(remote_signature);
8684 assert_eq!(htlcs.len(), per_htlc.len());
8686 let holder_commitment_tx = HolderCommitmentTransaction::new(
8687 commitment_tx.clone(),
8688 counterparty_signature,
8689 counterparty_htlc_sigs,
8690 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8691 chan.context.counterparty_funding_pubkey()
8693 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8694 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8696 let funding_redeemscript = chan.context.get_funding_redeemscript();
8697 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8698 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8700 // ((htlc, counterparty_sig), (index, holder_sig))
8701 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8704 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8705 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8707 let ref htlc = htlcs[$htlc_idx];
8708 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8709 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8710 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8711 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8712 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8713 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8714 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8716 let mut preimage: Option<PaymentPreimage> = None;
8719 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8720 if out == htlc.payment_hash {
8721 preimage = Some(PaymentPreimage([i; 32]));
8725 assert!(preimage.is_some());
8728 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8729 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8730 channel_derivation_parameters: ChannelDerivationParameters {
8731 value_satoshis: chan.context.channel_value_satoshis,
8732 keys_id: chan.context.channel_keys_id,
8733 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8735 commitment_txid: trusted_tx.txid(),
8736 per_commitment_number: trusted_tx.commitment_number(),
8737 per_commitment_point: trusted_tx.per_commitment_point(),
8738 feerate_per_kw: trusted_tx.feerate_per_kw(),
8740 preimage: preimage.clone(),
8741 counterparty_sig: *htlc_counterparty_sig,
8742 }, &secp_ctx).unwrap();
8743 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8744 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8746 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
8747 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8748 let trusted_tx = holder_commitment_tx.trust();
8749 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8750 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
8751 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
8753 assert!(htlc_counterparty_sig_iter.next().is_none());
8757 // anchors: simple commitment tx with no HTLCs and single anchor
8758 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8759 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8760 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8762 // simple commitment tx with no HTLCs
8763 chan.context.value_to_self_msat = 7000000000;
8765 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8766 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8767 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8769 // anchors: simple commitment tx with no HTLCs
8770 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8771 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8772 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8774 chan.context.pending_inbound_htlcs.push({
8775 let mut out = InboundHTLCOutput{
8777 amount_msat: 1000000,
8779 payment_hash: PaymentHash([0; 32]),
8780 state: InboundHTLCState::Committed,
8782 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
8785 chan.context.pending_inbound_htlcs.push({
8786 let mut out = InboundHTLCOutput{
8788 amount_msat: 2000000,
8790 payment_hash: PaymentHash([0; 32]),
8791 state: InboundHTLCState::Committed,
8793 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
8796 chan.context.pending_outbound_htlcs.push({
8797 let mut out = OutboundHTLCOutput{
8799 amount_msat: 2000000,
8801 payment_hash: PaymentHash([0; 32]),
8802 state: OutboundHTLCState::Committed,
8803 source: HTLCSource::dummy(),
8804 skimmed_fee_msat: None,
8805 blinding_point: None,
8807 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
8810 chan.context.pending_outbound_htlcs.push({
8811 let mut out = OutboundHTLCOutput{
8813 amount_msat: 3000000,
8815 payment_hash: PaymentHash([0; 32]),
8816 state: OutboundHTLCState::Committed,
8817 source: HTLCSource::dummy(),
8818 skimmed_fee_msat: None,
8819 blinding_point: None,
8821 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
8824 chan.context.pending_inbound_htlcs.push({
8825 let mut out = InboundHTLCOutput{
8827 amount_msat: 4000000,
8829 payment_hash: PaymentHash([0; 32]),
8830 state: InboundHTLCState::Committed,
8832 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
8836 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8837 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8838 chan.context.feerate_per_kw = 0;
8840 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8841 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8842 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8845 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8846 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8847 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8850 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8851 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8852 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8855 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8856 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8857 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8860 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8861 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8862 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8865 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8866 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8867 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8870 // commitment tx with seven outputs untrimmed (maximum feerate)
8871 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8872 chan.context.feerate_per_kw = 647;
8874 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8875 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8876 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8879 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8880 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8881 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8884 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8885 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8886 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8889 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8890 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8891 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8894 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8895 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8896 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8899 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8900 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8901 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8904 // commitment tx with six outputs untrimmed (minimum feerate)
8905 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8906 chan.context.feerate_per_kw = 648;
8908 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8909 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8910 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8913 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8914 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8915 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8918 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8919 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8920 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8923 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8924 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8925 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8928 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8929 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8930 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8933 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8934 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8935 chan.context.feerate_per_kw = 645;
8936 chan.context.holder_dust_limit_satoshis = 1001;
8938 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8939 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8940 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8943 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8944 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8945 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8948 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8949 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8950 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8953 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8954 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8955 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8958 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8959 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8960 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8963 // commitment tx with six outputs untrimmed (maximum feerate)
8964 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8965 chan.context.feerate_per_kw = 2069;
8966 chan.context.holder_dust_limit_satoshis = 546;
8968 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8969 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8970 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8973 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8974 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8975 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8978 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8979 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8980 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8983 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8984 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8985 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8988 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8989 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8990 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8993 // commitment tx with five outputs untrimmed (minimum feerate)
8994 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8995 chan.context.feerate_per_kw = 2070;
8997 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8998 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
8999 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9002 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9003 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9004 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9007 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9008 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9009 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9012 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9013 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9014 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9017 // commitment tx with five outputs untrimmed (maximum feerate)
9018 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9019 chan.context.feerate_per_kw = 2194;
9021 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9022 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9023 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9026 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9027 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9028 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9031 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9032 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9033 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9036 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9037 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9038 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9041 // commitment tx with four outputs untrimmed (minimum feerate)
9042 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9043 chan.context.feerate_per_kw = 2195;
9045 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9046 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9047 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9050 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9051 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9052 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9055 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9056 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9057 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9060 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9061 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9062 chan.context.feerate_per_kw = 2185;
9063 chan.context.holder_dust_limit_satoshis = 2001;
9064 let cached_channel_type = chan.context.channel_type;
9065 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9067 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9068 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9069 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9072 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9073 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9074 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9077 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9078 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9079 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9082 // commitment tx with four outputs untrimmed (maximum feerate)
9083 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9084 chan.context.feerate_per_kw = 3702;
9085 chan.context.holder_dust_limit_satoshis = 546;
9086 chan.context.channel_type = cached_channel_type.clone();
9088 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9089 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9090 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9093 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9094 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9095 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9098 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9099 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9100 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9103 // commitment tx with three outputs untrimmed (minimum feerate)
9104 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9105 chan.context.feerate_per_kw = 3703;
9107 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9108 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9109 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9112 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9113 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9114 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9117 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9118 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9119 chan.context.feerate_per_kw = 3687;
9120 chan.context.holder_dust_limit_satoshis = 3001;
9121 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9123 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9124 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9125 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9128 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9129 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9130 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9133 // commitment tx with three outputs untrimmed (maximum feerate)
9134 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9135 chan.context.feerate_per_kw = 4914;
9136 chan.context.holder_dust_limit_satoshis = 546;
9137 chan.context.channel_type = cached_channel_type.clone();
9139 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9140 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9141 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9144 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9145 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9146 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9149 // commitment tx with two outputs untrimmed (minimum feerate)
9150 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9151 chan.context.feerate_per_kw = 4915;
9152 chan.context.holder_dust_limit_satoshis = 546;
9154 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9155 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9156 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9158 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9159 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9160 chan.context.feerate_per_kw = 4894;
9161 chan.context.holder_dust_limit_satoshis = 4001;
9162 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9164 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9165 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9166 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9168 // commitment tx with two outputs untrimmed (maximum feerate)
9169 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9170 chan.context.feerate_per_kw = 9651180;
9171 chan.context.holder_dust_limit_satoshis = 546;
9172 chan.context.channel_type = cached_channel_type.clone();
9174 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9175 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9176 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9178 // commitment tx with one output untrimmed (minimum feerate)
9179 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9180 chan.context.feerate_per_kw = 9651181;
9182 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9183 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9184 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9186 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9187 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9188 chan.context.feerate_per_kw = 6216010;
9189 chan.context.holder_dust_limit_satoshis = 4001;
9190 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9192 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9193 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9194 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9196 // commitment tx with fee greater than funder amount
9197 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9198 chan.context.feerate_per_kw = 9651936;
9199 chan.context.holder_dust_limit_satoshis = 546;
9200 chan.context.channel_type = cached_channel_type;
9202 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9203 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9204 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9206 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9207 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9208 chan.context.feerate_per_kw = 253;
9209 chan.context.pending_inbound_htlcs.clear();
9210 chan.context.pending_inbound_htlcs.push({
9211 let mut out = InboundHTLCOutput{
9213 amount_msat: 2000000,
9215 payment_hash: PaymentHash([0; 32]),
9216 state: InboundHTLCState::Committed,
9218 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9221 chan.context.pending_outbound_htlcs.clear();
9222 chan.context.pending_outbound_htlcs.push({
9223 let mut out = OutboundHTLCOutput{
9225 amount_msat: 5000001,
9227 payment_hash: PaymentHash([0; 32]),
9228 state: OutboundHTLCState::Committed,
9229 source: HTLCSource::dummy(),
9230 skimmed_fee_msat: None,
9231 blinding_point: None,
9233 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9236 chan.context.pending_outbound_htlcs.push({
9237 let mut out = OutboundHTLCOutput{
9239 amount_msat: 5000000,
9241 payment_hash: PaymentHash([0; 32]),
9242 state: OutboundHTLCState::Committed,
9243 source: HTLCSource::dummy(),
9244 skimmed_fee_msat: None,
9245 blinding_point: None,
9247 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9251 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9252 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9253 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9256 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9257 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9258 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9260 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9261 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9262 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9264 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9265 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9266 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9269 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9270 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9271 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9272 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9275 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9276 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9277 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9279 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9280 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9281 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9283 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9284 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9285 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9290 fn test_per_commitment_secret_gen() {
9291 // Test vectors from BOLT 3 Appendix D:
9293 let mut seed = [0; 32];
9294 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9295 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9296 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9298 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9299 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9300 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9302 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9303 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9305 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9306 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9308 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9309 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9310 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9314 fn test_key_derivation() {
9315 // Test vectors from BOLT 3 Appendix E:
9316 let secp_ctx = Secp256k1::new();
9318 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9319 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9321 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9322 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9324 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9325 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9327 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9328 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9330 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9331 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9333 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9334 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9338 fn test_zero_conf_channel_type_support() {
9339 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9340 let secp_ctx = Secp256k1::new();
9341 let seed = [42; 32];
9342 let network = Network::Testnet;
9343 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9344 let logger = test_utils::TestLogger::new();
9346 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9347 let config = UserConfig::default();
9348 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9349 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9351 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9352 channel_type_features.set_zero_conf_required();
9354 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9355 open_channel_msg.channel_type = Some(channel_type_features);
9356 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9357 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9358 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9359 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9360 assert!(res.is_ok());
9364 fn test_supports_anchors_zero_htlc_tx_fee() {
9365 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9366 // resulting `channel_type`.
9367 let secp_ctx = Secp256k1::new();
9368 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9369 let network = Network::Testnet;
9370 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9371 let logger = test_utils::TestLogger::new();
9373 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9374 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9376 let mut config = UserConfig::default();
9377 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9379 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9380 // need to signal it.
9381 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9382 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9383 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9384 &config, 0, 42, None
9386 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9388 let mut expected_channel_type = ChannelTypeFeatures::empty();
9389 expected_channel_type.set_static_remote_key_required();
9390 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9392 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9393 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9394 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9398 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9399 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9400 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9401 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9402 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9405 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9406 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9410 fn test_rejects_implicit_simple_anchors() {
9411 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9412 // each side's `InitFeatures`, it is rejected.
9413 let secp_ctx = Secp256k1::new();
9414 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9415 let network = Network::Testnet;
9416 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9417 let logger = test_utils::TestLogger::new();
9419 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9420 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9422 let config = UserConfig::default();
9424 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9425 let static_remote_key_required: u64 = 1 << 12;
9426 let simple_anchors_required: u64 = 1 << 20;
9427 let raw_init_features = static_remote_key_required | simple_anchors_required;
9428 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9430 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9431 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9432 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9436 // Set `channel_type` to `None` to force the implicit feature negotiation.
9437 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9438 open_channel_msg.channel_type = None;
9440 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9441 // `static_remote_key`, it will fail the channel.
9442 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9443 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9444 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9445 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9447 assert!(channel_b.is_err());
9451 fn test_rejects_simple_anchors_channel_type() {
9452 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9454 let secp_ctx = Secp256k1::new();
9455 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9456 let network = Network::Testnet;
9457 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9458 let logger = test_utils::TestLogger::new();
9460 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9461 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9463 let config = UserConfig::default();
9465 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9466 let static_remote_key_required: u64 = 1 << 12;
9467 let simple_anchors_required: u64 = 1 << 20;
9468 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9469 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9470 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9471 assert!(!simple_anchors_init.requires_unknown_bits());
9472 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9474 // First, we'll try to open a channel between A and B where A requests a channel type for
9475 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9476 // B as it's not supported by LDK.
9477 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9478 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9479 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9483 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9484 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9486 let res = InboundV1Channel::<&TestKeysInterface>::new(
9487 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9488 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9489 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9491 assert!(res.is_err());
9493 // Then, we'll try to open another channel where A requests a channel type for
9494 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9495 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9497 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9498 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9499 10000000, 100000, 42, &config, 0, 42, None
9502 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9504 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9505 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9506 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9507 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9510 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9511 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9513 let res = channel_a.accept_channel(
9514 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9516 assert!(res.is_err());
9520 fn test_waiting_for_batch() {
9521 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9522 let logger = test_utils::TestLogger::new();
9523 let secp_ctx = Secp256k1::new();
9524 let seed = [42; 32];
9525 let network = Network::Testnet;
9526 let best_block = BestBlock::from_network(network);
9527 let chain_hash = ChainHash::using_genesis_block(network);
9528 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9530 let mut config = UserConfig::default();
9531 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9532 // channel in a batch before all channels are ready.
9533 config.channel_handshake_limits.trust_own_funding_0conf = true;
9535 // Create a channel from node a to node b that will be part of batch funding.
9536 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9537 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9542 &channelmanager::provided_init_features(&config),
9552 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9553 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9554 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9559 &channelmanager::provided_channel_type_features(&config),
9560 &channelmanager::provided_init_features(&config),
9566 true, // Allow node b to send a 0conf channel_ready.
9569 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9570 node_a_chan.accept_channel(
9571 &accept_channel_msg,
9572 &config.channel_handshake_limits,
9573 &channelmanager::provided_init_features(&config),
9576 // Fund the channel with a batch funding transaction.
9577 let output_script = node_a_chan.context.get_funding_redeemscript();
9578 let tx = Transaction {
9580 lock_time: LockTime::ZERO,
9584 value: 10000000, script_pubkey: output_script.clone(),
9587 value: 10000000, script_pubkey: Builder::new().into_script(),
9590 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9591 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9596 ).map_err(|_| ()).unwrap();
9597 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9598 &funding_created_msg.unwrap(),
9602 ).map_err(|_| ()).unwrap();
9603 let node_b_updates = node_b_chan.monitor_updating_restored(
9611 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9612 // broadcasting the funding transaction until the batch is ready.
9613 let _ = node_a_chan.funding_signed(
9614 &funding_signed_msg.unwrap(),
9619 let node_a_updates = node_a_chan.monitor_updating_restored(
9626 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9627 // as the funding transaction depends on all channels in the batch becoming ready.
9628 assert!(node_a_updates.channel_ready.is_none());
9629 assert!(node_a_updates.funding_broadcastable.is_none());
9631 node_a_chan.context.channel_state,
9632 ChannelState::FundingSent as u32 |
9633 ChannelState::WaitingForBatch as u32,
9636 // It is possible to receive a 0conf channel_ready from the remote node.
9637 node_a_chan.channel_ready(
9638 &node_b_updates.channel_ready.unwrap(),
9646 node_a_chan.context.channel_state,
9647 ChannelState::FundingSent as u32 |
9648 ChannelState::WaitingForBatch as u32 |
9649 ChannelState::TheirChannelReady as u32,
9652 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9653 node_a_chan.set_batch_ready();
9655 node_a_chan.context.channel_state,
9656 ChannelState::FundingSent as u32 |
9657 ChannelState::TheirChannelReady as u32,
9659 assert!(node_a_chan.check_get_channel_ready(0).is_some());