1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
264 /// There are a few "states" and then a number of flags which can be applied:
265 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
266 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
267 /// move on to `ChannelReady`.
268 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
269 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
270 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
272 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
273 OurInitSent = 1 << 0,
274 /// Implies we have received their `open_channel`/`accept_channel` message
275 TheirInitSent = 1 << 1,
276 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
277 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
278 /// upon receipt of `funding_created`, so simply skip this state.
280 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
281 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
282 /// and our counterparty consider the funding transaction confirmed.
284 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
285 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
286 TheirChannelReady = 1 << 4,
287 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
288 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
289 OurChannelReady = 1 << 5,
291 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
292 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
294 PeerDisconnected = 1 << 7,
295 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
296 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
297 /// sending any outbound messages until they've managed to finish.
298 MonitorUpdateInProgress = 1 << 8,
299 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
300 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
301 /// messages as then we will be unable to determine which HTLCs they included in their
302 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
304 /// Flag is set on `ChannelReady`.
305 AwaitingRemoteRevoke = 1 << 9,
306 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
307 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
308 /// to respond with our own shutdown message when possible.
309 RemoteShutdownSent = 1 << 10,
310 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
311 /// point, we may not add any new HTLCs to the channel.
312 LocalShutdownSent = 1 << 11,
313 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
314 /// to drop us, but we store this anyway.
315 ShutdownComplete = 4096,
316 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
317 /// broadcasting of the funding transaction is being held until all channels in the batch
318 /// have received funding_signed and have their monitors persisted.
319 WaitingForBatch = 1 << 13,
321 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
322 ChannelState::LocalShutdownSent as u32 |
323 ChannelState::RemoteShutdownSent as u32;
324 const MULTI_STATE_FLAGS: u32 =
325 BOTH_SIDES_SHUTDOWN_MASK |
326 ChannelState::PeerDisconnected as u32 |
327 ChannelState::MonitorUpdateInProgress as u32;
328 const STATE_FLAGS: u32 =
330 ChannelState::TheirChannelReady as u32 |
331 ChannelState::OurChannelReady as u32 |
332 ChannelState::AwaitingRemoteRevoke as u32 |
333 ChannelState::WaitingForBatch as u32;
335 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
337 pub const DEFAULT_MAX_HTLCS: u16 = 50;
339 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
340 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
341 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
342 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
346 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
348 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
350 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
352 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
353 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
354 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
355 /// `holder_max_htlc_value_in_flight_msat`.
356 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
358 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
359 /// `option_support_large_channel` (aka wumbo channels) is not supported.
361 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
363 /// Total bitcoin supply in satoshis.
364 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
366 /// The maximum network dust limit for standard script formats. This currently represents the
367 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
368 /// transaction non-standard and thus refuses to relay it.
369 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
370 /// implementations use this value for their dust limit today.
371 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
373 /// The maximum channel dust limit we will accept from our counterparty.
374 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
376 /// The dust limit is used for both the commitment transaction outputs as well as the closing
377 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
378 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
379 /// In order to avoid having to concern ourselves with standardness during the closing process, we
380 /// simply require our counterparty to use a dust limit which will leave any segwit output
382 /// See <https://github.com/lightning/bolts/issues/905> for more details.
383 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
385 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
386 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
388 /// Used to return a simple Error back to ChannelManager. Will get converted to a
389 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
390 /// channel_id in ChannelManager.
391 pub(super) enum ChannelError {
397 impl fmt::Debug for ChannelError {
398 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
400 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
401 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
402 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
407 impl fmt::Display for ChannelError {
408 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
410 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
411 &ChannelError::Warn(ref e) => write!(f, "{}", e),
412 &ChannelError::Close(ref e) => write!(f, "{}", e),
417 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
419 pub peer_id: Option<PublicKey>,
420 pub channel_id: Option<ChannelId>,
423 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
424 fn log(&self, mut record: Record) {
425 record.peer_id = self.peer_id;
426 record.channel_id = self.channel_id;
427 self.logger.log(record)
431 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
432 where L::Target: Logger {
433 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
434 where S::Target: SignerProvider
438 peer_id: Some(context.counterparty_node_id),
439 channel_id: Some(context.channel_id),
444 macro_rules! secp_check {
445 ($res: expr, $err: expr) => {
448 Err(_) => return Err(ChannelError::Close($err)),
453 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
454 /// our counterparty or not. However, we don't want to announce updates right away to avoid
455 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
456 /// our channel_update message and track the current state here.
457 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
458 #[derive(Clone, Copy, PartialEq)]
459 pub(super) enum ChannelUpdateStatus {
460 /// We've announced the channel as enabled and are connected to our peer.
462 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
464 /// Our channel is live again, but we haven't announced the channel as enabled yet.
466 /// We've announced the channel as disabled.
470 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
472 pub enum AnnouncementSigsState {
473 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
474 /// we sent the last `AnnouncementSignatures`.
476 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
477 /// This state never appears on disk - instead we write `NotSent`.
479 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
480 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
481 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
482 /// they send back a `RevokeAndACK`.
483 /// This state never appears on disk - instead we write `NotSent`.
485 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
486 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
490 /// An enum indicating whether the local or remote side offered a given HTLC.
496 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
499 pending_htlcs_value_msat: u64,
500 on_counterparty_tx_dust_exposure_msat: u64,
501 on_holder_tx_dust_exposure_msat: u64,
502 holding_cell_msat: u64,
503 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
506 /// An enum gathering stats on commitment transaction, either local or remote.
507 struct CommitmentStats<'a> {
508 tx: CommitmentTransaction, // the transaction info
509 feerate_per_kw: u32, // the feerate included to build the transaction
510 total_fee_sat: u64, // the total fee included in the transaction
511 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
512 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
513 local_balance_msat: u64, // local balance before fees but considering dust limits
514 remote_balance_msat: u64, // remote balance before fees but considering dust limits
515 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
516 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
519 /// Used when calculating whether we or the remote can afford an additional HTLC.
520 struct HTLCCandidate {
522 origin: HTLCInitiator,
526 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
534 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
536 enum UpdateFulfillFetch {
538 monitor_update: ChannelMonitorUpdate,
539 htlc_value_msat: u64,
540 msg: Option<msgs::UpdateFulfillHTLC>,
545 /// The return type of get_update_fulfill_htlc_and_commit.
546 pub enum UpdateFulfillCommitFetch {
547 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
548 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
549 /// previously placed in the holding cell (and has since been removed).
551 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
552 monitor_update: ChannelMonitorUpdate,
553 /// The value of the HTLC which was claimed, in msat.
554 htlc_value_msat: u64,
556 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
557 /// or has been forgotten (presumably previously claimed).
561 /// The return value of `monitor_updating_restored`
562 pub(super) struct MonitorRestoreUpdates {
563 pub raa: Option<msgs::RevokeAndACK>,
564 pub commitment_update: Option<msgs::CommitmentUpdate>,
565 pub order: RAACommitmentOrder,
566 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
567 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
568 pub finalized_claimed_htlcs: Vec<HTLCSource>,
569 pub funding_broadcastable: Option<Transaction>,
570 pub channel_ready: Option<msgs::ChannelReady>,
571 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
574 /// The return value of `signer_maybe_unblocked`
576 pub(super) struct SignerResumeUpdates {
577 pub commitment_update: Option<msgs::CommitmentUpdate>,
578 pub funding_signed: Option<msgs::FundingSigned>,
579 pub funding_created: Option<msgs::FundingCreated>,
580 pub channel_ready: Option<msgs::ChannelReady>,
583 /// The return value of `channel_reestablish`
584 pub(super) struct ReestablishResponses {
585 pub channel_ready: Option<msgs::ChannelReady>,
586 pub raa: Option<msgs::RevokeAndACK>,
587 pub commitment_update: Option<msgs::CommitmentUpdate>,
588 pub order: RAACommitmentOrder,
589 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
590 pub shutdown_msg: Option<msgs::Shutdown>,
593 /// The result of a shutdown that should be handled.
595 pub(crate) struct ShutdownResult {
596 /// A channel monitor update to apply.
597 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
598 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
599 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
600 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
601 /// propagated to the remainder of the batch.
602 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
603 pub(crate) channel_id: ChannelId,
604 pub(crate) counterparty_node_id: PublicKey,
607 /// If the majority of the channels funds are to the fundee and the initiator holds only just
608 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
609 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
610 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
611 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
612 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
613 /// by this multiple without hitting this case, before sending.
614 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
615 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
616 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
617 /// leave the channel less usable as we hold a bigger reserve.
618 #[cfg(any(fuzzing, test))]
619 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
620 #[cfg(not(any(fuzzing, test)))]
621 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
623 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
624 /// channel creation on an inbound channel, we simply force-close and move on.
625 /// This constant is the one suggested in BOLT 2.
626 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
628 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
629 /// not have enough balance value remaining to cover the onchain cost of this new
630 /// HTLC weight. If this happens, our counterparty fails the reception of our
631 /// commitment_signed including this new HTLC due to infringement on the channel
633 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
634 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
635 /// leads to a channel force-close. Ultimately, this is an issue coming from the
636 /// design of LN state machines, allowing asynchronous updates.
637 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
639 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
640 /// commitment transaction fees, with at least this many HTLCs present on the commitment
641 /// transaction (not counting the value of the HTLCs themselves).
642 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
644 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
645 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
646 /// ChannelUpdate prompted by the config update. This value was determined as follows:
648 /// * The expected interval between ticks (1 minute).
649 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
650 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
651 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
652 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
654 /// The number of ticks that may elapse while we're waiting for a response to a
655 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
658 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
659 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
661 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
662 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
663 /// exceeding this age limit will be force-closed and purged from memory.
664 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
666 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
667 pub(crate) const COINBASE_MATURITY: u32 = 100;
669 struct PendingChannelMonitorUpdate {
670 update: ChannelMonitorUpdate,
673 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
674 (0, update, required),
677 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
678 /// its variants containing an appropriate channel struct.
679 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
680 UnfundedOutboundV1(OutboundV1Channel<SP>),
681 UnfundedInboundV1(InboundV1Channel<SP>),
685 impl<'a, SP: Deref> ChannelPhase<SP> where
686 SP::Target: SignerProvider,
687 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
689 pub fn context(&'a self) -> &'a ChannelContext<SP> {
691 ChannelPhase::Funded(chan) => &chan.context,
692 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
693 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
697 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
699 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
700 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
701 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
706 /// Contains all state common to unfunded inbound/outbound channels.
707 pub(super) struct UnfundedChannelContext {
708 /// A counter tracking how many ticks have elapsed since this unfunded channel was
709 /// created. If this unfunded channel reaches peer has yet to respond after reaching
710 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
712 /// This is so that we don't keep channels around that haven't progressed to a funded state
713 /// in a timely manner.
714 unfunded_channel_age_ticks: usize,
717 impl UnfundedChannelContext {
718 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
719 /// having reached the unfunded channel age limit.
721 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
722 pub fn should_expire_unfunded_channel(&mut self) -> bool {
723 self.unfunded_channel_age_ticks += 1;
724 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
728 /// Contains everything about the channel including state, and various flags.
729 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
730 config: LegacyChannelConfig,
732 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
733 // constructed using it. The second element in the tuple corresponds to the number of ticks that
734 // have elapsed since the update occurred.
735 prev_config: Option<(ChannelConfig, usize)>,
737 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
741 /// The current channel ID.
742 channel_id: ChannelId,
743 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
744 /// Will be `None` for channels created prior to 0.0.115.
745 temporary_channel_id: Option<ChannelId>,
748 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
749 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
751 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
752 // Note that a number of our tests were written prior to the behavior here which retransmits
753 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
755 #[cfg(any(test, feature = "_test_utils"))]
756 pub(crate) announcement_sigs_state: AnnouncementSigsState,
757 #[cfg(not(any(test, feature = "_test_utils")))]
758 announcement_sigs_state: AnnouncementSigsState,
760 secp_ctx: Secp256k1<secp256k1::All>,
761 channel_value_satoshis: u64,
763 latest_monitor_update_id: u64,
765 holder_signer: ChannelSignerType<SP>,
766 shutdown_scriptpubkey: Option<ShutdownScript>,
767 destination_script: ScriptBuf,
769 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
770 // generation start at 0 and count up...this simplifies some parts of implementation at the
771 // cost of others, but should really just be changed.
773 cur_holder_commitment_transaction_number: u64,
774 cur_counterparty_commitment_transaction_number: u64,
775 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
776 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
777 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
778 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
780 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
781 /// need to ensure we resend them in the order we originally generated them. Note that because
782 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
783 /// sufficient to simply set this to the opposite of any message we are generating as we
784 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
785 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
787 resend_order: RAACommitmentOrder,
789 monitor_pending_channel_ready: bool,
790 monitor_pending_revoke_and_ack: bool,
791 monitor_pending_commitment_signed: bool,
793 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
794 // responsible for some of the HTLCs here or not - we don't know whether the update in question
795 // completed or not. We currently ignore these fields entirely when force-closing a channel,
796 // but need to handle this somehow or we run the risk of losing HTLCs!
797 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
798 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
799 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
801 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
802 /// but our signer (initially) refused to give us a signature, we should retry at some point in
803 /// the future when the signer indicates it may have a signature for us.
805 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
806 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
807 signer_pending_commitment_update: bool,
808 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
809 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
810 /// outbound or inbound.
811 signer_pending_funding: bool,
813 // pending_update_fee is filled when sending and receiving update_fee.
815 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
816 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
817 // generating new commitment transactions with exactly the same criteria as inbound/outbound
818 // HTLCs with similar state.
819 pending_update_fee: Option<(u32, FeeUpdateState)>,
820 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
821 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
822 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
823 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
824 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
825 holding_cell_update_fee: Option<u32>,
826 next_holder_htlc_id: u64,
827 next_counterparty_htlc_id: u64,
830 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
831 /// when the channel is updated in ways which may impact the `channel_update` message or when a
832 /// new block is received, ensuring it's always at least moderately close to the current real
834 update_time_counter: u32,
836 #[cfg(debug_assertions)]
837 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
838 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
839 #[cfg(debug_assertions)]
840 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
841 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
843 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
844 target_closing_feerate_sats_per_kw: Option<u32>,
846 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
847 /// update, we need to delay processing it until later. We do that here by simply storing the
848 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
849 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
851 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
852 /// transaction. These are set once we reach `closing_negotiation_ready`.
854 pub(crate) closing_fee_limits: Option<(u64, u64)>,
856 closing_fee_limits: Option<(u64, u64)>,
858 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
859 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
860 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
861 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
862 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
864 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
865 /// until we see a `commitment_signed` before doing so.
867 /// We don't bother to persist this - we anticipate this state won't last longer than a few
868 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
869 expecting_peer_commitment_signed: bool,
871 /// The hash of the block in which the funding transaction was included.
872 funding_tx_confirmed_in: Option<BlockHash>,
873 funding_tx_confirmation_height: u32,
874 short_channel_id: Option<u64>,
875 /// Either the height at which this channel was created or the height at which it was last
876 /// serialized if it was serialized by versions prior to 0.0.103.
877 /// We use this to close if funding is never broadcasted.
878 channel_creation_height: u32,
880 counterparty_dust_limit_satoshis: u64,
883 pub(super) holder_dust_limit_satoshis: u64,
885 holder_dust_limit_satoshis: u64,
888 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
890 counterparty_max_htlc_value_in_flight_msat: u64,
893 pub(super) holder_max_htlc_value_in_flight_msat: u64,
895 holder_max_htlc_value_in_flight_msat: u64,
897 /// minimum channel reserve for self to maintain - set by them.
898 counterparty_selected_channel_reserve_satoshis: Option<u64>,
901 pub(super) holder_selected_channel_reserve_satoshis: u64,
903 holder_selected_channel_reserve_satoshis: u64,
905 counterparty_htlc_minimum_msat: u64,
906 holder_htlc_minimum_msat: u64,
908 pub counterparty_max_accepted_htlcs: u16,
910 counterparty_max_accepted_htlcs: u16,
911 holder_max_accepted_htlcs: u16,
912 minimum_depth: Option<u32>,
914 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
916 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
917 funding_transaction: Option<Transaction>,
918 is_batch_funding: Option<()>,
920 counterparty_cur_commitment_point: Option<PublicKey>,
921 counterparty_prev_commitment_point: Option<PublicKey>,
922 counterparty_node_id: PublicKey,
924 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
926 commitment_secrets: CounterpartyCommitmentSecrets,
928 channel_update_status: ChannelUpdateStatus,
929 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
930 /// not complete within a single timer tick (one minute), we should force-close the channel.
931 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
933 /// Note that this field is reset to false on deserialization to give us a chance to connect to
934 /// our peer and start the closing_signed negotiation fresh.
935 closing_signed_in_flight: bool,
937 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
938 /// This can be used to rebroadcast the channel_announcement message later.
939 announcement_sigs: Option<(Signature, Signature)>,
941 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
942 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
943 // be, by comparing the cached values to the fee of the tranaction generated by
944 // `build_commitment_transaction`.
945 #[cfg(any(test, fuzzing))]
946 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
947 #[cfg(any(test, fuzzing))]
948 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
950 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
951 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
952 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
953 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
954 /// message until we receive a channel_reestablish.
956 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
957 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
959 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
960 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
961 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
962 /// unblock the state machine.
964 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
965 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
966 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
968 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
969 /// [`msgs::RevokeAndACK`] message from the counterparty.
970 sent_message_awaiting_response: Option<usize>,
972 #[cfg(any(test, fuzzing))]
973 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
974 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
975 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
976 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
977 // is fine, but as a sanity check in our failure to generate the second claim, we check here
978 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
979 historical_inbound_htlc_fulfills: HashSet<u64>,
981 /// This channel's type, as negotiated during channel open
982 channel_type: ChannelTypeFeatures,
984 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
985 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
986 // the channel's funding UTXO.
988 // We also use this when sending our peer a channel_update that isn't to be broadcasted
989 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
990 // associated channel mapping.
992 // We only bother storing the most recent SCID alias at any time, though our counterparty has
993 // to store all of them.
994 latest_inbound_scid_alias: Option<u64>,
996 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
997 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
998 // don't currently support node id aliases and eventually privacy should be provided with
999 // blinded paths instead of simple scid+node_id aliases.
1000 outbound_scid_alias: u64,
1002 // We track whether we already emitted a `ChannelPending` event.
1003 channel_pending_event_emitted: bool,
1005 // We track whether we already emitted a `ChannelReady` event.
1006 channel_ready_event_emitted: bool,
1008 /// The unique identifier used to re-derive the private key material for the channel through
1009 /// [`SignerProvider::derive_channel_signer`].
1010 channel_keys_id: [u8; 32],
1012 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1013 /// store it here and only release it to the `ChannelManager` once it asks for it.
1014 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1017 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1018 /// Allowed in any state (including after shutdown)
1019 pub fn get_update_time_counter(&self) -> u32 {
1020 self.update_time_counter
1023 pub fn get_latest_monitor_update_id(&self) -> u64 {
1024 self.latest_monitor_update_id
1027 pub fn should_announce(&self) -> bool {
1028 self.config.announced_channel
1031 pub fn is_outbound(&self) -> bool {
1032 self.channel_transaction_parameters.is_outbound_from_holder
1035 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1036 /// Allowed in any state (including after shutdown)
1037 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1038 self.config.options.forwarding_fee_base_msat
1041 /// Returns true if we've ever received a message from the remote end for this Channel
1042 pub fn have_received_message(&self) -> bool {
1043 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1046 /// Returns true if this channel is fully established and not known to be closing.
1047 /// Allowed in any state (including after shutdown)
1048 pub fn is_usable(&self) -> bool {
1049 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1050 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1053 /// shutdown state returns the state of the channel in its various stages of shutdown
1054 pub fn shutdown_state(&self) -> ChannelShutdownState {
1055 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1056 return ChannelShutdownState::ShutdownComplete;
1058 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1059 return ChannelShutdownState::ShutdownInitiated;
1061 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1062 return ChannelShutdownState::ResolvingHTLCs;
1064 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1065 return ChannelShutdownState::NegotiatingClosingFee;
1067 return ChannelShutdownState::NotShuttingDown;
1070 fn closing_negotiation_ready(&self) -> bool {
1071 self.pending_inbound_htlcs.is_empty() &&
1072 self.pending_outbound_htlcs.is_empty() &&
1073 self.pending_update_fee.is_none() &&
1074 self.channel_state &
1075 (BOTH_SIDES_SHUTDOWN_MASK |
1076 ChannelState::AwaitingRemoteRevoke as u32 |
1077 ChannelState::PeerDisconnected as u32 |
1078 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1081 /// Returns true if this channel is currently available for use. This is a superset of
1082 /// is_usable() and considers things like the channel being temporarily disabled.
1083 /// Allowed in any state (including after shutdown)
1084 pub fn is_live(&self) -> bool {
1085 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1088 // Public utilities:
1090 pub fn channel_id(&self) -> ChannelId {
1094 // Return the `temporary_channel_id` used during channel establishment.
1096 // Will return `None` for channels created prior to LDK version 0.0.115.
1097 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1098 self.temporary_channel_id
1101 pub fn minimum_depth(&self) -> Option<u32> {
1105 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1106 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1107 pub fn get_user_id(&self) -> u128 {
1111 /// Gets the channel's type
1112 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1116 /// Gets the channel's `short_channel_id`.
1118 /// Will return `None` if the channel hasn't been confirmed yet.
1119 pub fn get_short_channel_id(&self) -> Option<u64> {
1120 self.short_channel_id
1123 /// Allowed in any state (including after shutdown)
1124 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1125 self.latest_inbound_scid_alias
1128 /// Allowed in any state (including after shutdown)
1129 pub fn outbound_scid_alias(&self) -> u64 {
1130 self.outbound_scid_alias
1133 /// Returns the holder signer for this channel.
1135 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1136 return &self.holder_signer
1139 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1140 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1141 /// or prior to any channel actions during `Channel` initialization.
1142 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1143 debug_assert_eq!(self.outbound_scid_alias, 0);
1144 self.outbound_scid_alias = outbound_scid_alias;
1147 /// Returns the funding_txo we either got from our peer, or were given by
1148 /// get_funding_created.
1149 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1150 self.channel_transaction_parameters.funding_outpoint
1153 /// Returns the height in which our funding transaction was confirmed.
1154 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1155 let conf_height = self.funding_tx_confirmation_height;
1156 if conf_height > 0 {
1163 /// Returns the block hash in which our funding transaction was confirmed.
1164 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1165 self.funding_tx_confirmed_in
1168 /// Returns the current number of confirmations on the funding transaction.
1169 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1170 if self.funding_tx_confirmation_height == 0 {
1171 // We either haven't seen any confirmation yet, or observed a reorg.
1175 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1178 fn get_holder_selected_contest_delay(&self) -> u16 {
1179 self.channel_transaction_parameters.holder_selected_contest_delay
1182 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1183 &self.channel_transaction_parameters.holder_pubkeys
1186 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1187 self.channel_transaction_parameters.counterparty_parameters
1188 .as_ref().map(|params| params.selected_contest_delay)
1191 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1192 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1195 /// Allowed in any state (including after shutdown)
1196 pub fn get_counterparty_node_id(&self) -> PublicKey {
1197 self.counterparty_node_id
1200 /// Allowed in any state (including after shutdown)
1201 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1202 self.holder_htlc_minimum_msat
1205 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1206 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1207 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1210 /// Allowed in any state (including after shutdown)
1211 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1213 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1214 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1215 // channel might have been used to route very small values (either by honest users or as DoS).
1216 self.channel_value_satoshis * 1000 * 9 / 10,
1218 self.counterparty_max_htlc_value_in_flight_msat
1222 /// Allowed in any state (including after shutdown)
1223 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1224 self.counterparty_htlc_minimum_msat
1227 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1228 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1229 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1232 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1233 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1234 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1236 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1237 party_max_htlc_value_in_flight_msat
1242 pub fn get_value_satoshis(&self) -> u64 {
1243 self.channel_value_satoshis
1246 pub fn get_fee_proportional_millionths(&self) -> u32 {
1247 self.config.options.forwarding_fee_proportional_millionths
1250 pub fn get_cltv_expiry_delta(&self) -> u16 {
1251 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1254 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1255 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1256 where F::Target: FeeEstimator
1258 match self.config.options.max_dust_htlc_exposure {
1259 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1260 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1261 ConfirmationTarget::OnChainSweep) as u64;
1262 feerate_per_kw.saturating_mul(multiplier)
1264 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1268 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1269 pub fn prev_config(&self) -> Option<ChannelConfig> {
1270 self.prev_config.map(|prev_config| prev_config.0)
1273 // Checks whether we should emit a `ChannelPending` event.
1274 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1275 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1278 // Returns whether we already emitted a `ChannelPending` event.
1279 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1280 self.channel_pending_event_emitted
1283 // Remembers that we already emitted a `ChannelPending` event.
1284 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1285 self.channel_pending_event_emitted = true;
1288 // Checks whether we should emit a `ChannelReady` event.
1289 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1290 self.is_usable() && !self.channel_ready_event_emitted
1293 // Remembers that we already emitted a `ChannelReady` event.
1294 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1295 self.channel_ready_event_emitted = true;
1298 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1299 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1300 /// no longer be considered when forwarding HTLCs.
1301 pub fn maybe_expire_prev_config(&mut self) {
1302 if self.prev_config.is_none() {
1305 let prev_config = self.prev_config.as_mut().unwrap();
1307 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1308 self.prev_config = None;
1312 /// Returns the current [`ChannelConfig`] applied to the channel.
1313 pub fn config(&self) -> ChannelConfig {
1317 /// Updates the channel's config. A bool is returned indicating whether the config update
1318 /// applied resulted in a new ChannelUpdate message.
1319 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1320 let did_channel_update =
1321 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1322 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1323 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1324 if did_channel_update {
1325 self.prev_config = Some((self.config.options, 0));
1326 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1327 // policy change to propagate throughout the network.
1328 self.update_time_counter += 1;
1330 self.config.options = *config;
1334 /// Returns true if funding_signed was sent/received and the
1335 /// funding transaction has been broadcast if necessary.
1336 pub fn is_funding_broadcast(&self) -> bool {
1337 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1338 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1341 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1342 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1343 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1344 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1345 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1347 /// @local is used only to convert relevant internal structures which refer to remote vs local
1348 /// to decide value of outputs and direction of HTLCs.
1349 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1350 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1351 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1352 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1353 /// which peer generated this transaction and "to whom" this transaction flows.
1355 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1356 where L::Target: Logger
1358 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1359 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1360 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1362 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1363 let mut remote_htlc_total_msat = 0;
1364 let mut local_htlc_total_msat = 0;
1365 let mut value_to_self_msat_offset = 0;
1367 let mut feerate_per_kw = self.feerate_per_kw;
1368 if let Some((feerate, update_state)) = self.pending_update_fee {
1369 if match update_state {
1370 // Note that these match the inclusion criteria when scanning
1371 // pending_inbound_htlcs below.
1372 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1373 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1374 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1376 feerate_per_kw = feerate;
1380 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1381 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1382 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1384 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1386 macro_rules! get_htlc_in_commitment {
1387 ($htlc: expr, $offered: expr) => {
1388 HTLCOutputInCommitment {
1390 amount_msat: $htlc.amount_msat,
1391 cltv_expiry: $htlc.cltv_expiry,
1392 payment_hash: $htlc.payment_hash,
1393 transaction_output_index: None
1398 macro_rules! add_htlc_output {
1399 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1400 if $outbound == local { // "offered HTLC output"
1401 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1402 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1405 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1407 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1408 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1409 included_non_dust_htlcs.push((htlc_in_tx, $source));
1411 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1412 included_dust_htlcs.push((htlc_in_tx, $source));
1415 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1416 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1419 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1421 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1422 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1423 included_non_dust_htlcs.push((htlc_in_tx, $source));
1425 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1426 included_dust_htlcs.push((htlc_in_tx, $source));
1432 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1434 for ref htlc in self.pending_inbound_htlcs.iter() {
1435 let (include, state_name) = match htlc.state {
1436 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1437 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1438 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1439 InboundHTLCState::Committed => (true, "Committed"),
1440 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1444 add_htlc_output!(htlc, false, None, state_name);
1445 remote_htlc_total_msat += htlc.amount_msat;
1447 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1449 &InboundHTLCState::LocalRemoved(ref reason) => {
1450 if generated_by_local {
1451 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1452 inbound_htlc_preimages.push(preimage);
1453 value_to_self_msat_offset += htlc.amount_msat as i64;
1463 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1465 for ref htlc in self.pending_outbound_htlcs.iter() {
1466 let (include, state_name) = match htlc.state {
1467 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1468 OutboundHTLCState::Committed => (true, "Committed"),
1469 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1470 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1471 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1474 let preimage_opt = match htlc.state {
1475 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1476 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1477 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1481 if let Some(preimage) = preimage_opt {
1482 outbound_htlc_preimages.push(preimage);
1486 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1487 local_htlc_total_msat += htlc.amount_msat;
1489 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1491 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1492 value_to_self_msat_offset -= htlc.amount_msat as i64;
1494 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1495 if !generated_by_local {
1496 value_to_self_msat_offset -= htlc.amount_msat as i64;
1504 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1505 assert!(value_to_self_msat >= 0);
1506 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1507 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1508 // "violate" their reserve value by couting those against it. Thus, we have to convert
1509 // everything to i64 before subtracting as otherwise we can overflow.
1510 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1511 assert!(value_to_remote_msat >= 0);
1513 #[cfg(debug_assertions)]
1515 // Make sure that the to_self/to_remote is always either past the appropriate
1516 // channel_reserve *or* it is making progress towards it.
1517 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1518 self.holder_max_commitment_tx_output.lock().unwrap()
1520 self.counterparty_max_commitment_tx_output.lock().unwrap()
1522 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1523 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1524 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1525 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1528 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1529 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1530 let (value_to_self, value_to_remote) = if self.is_outbound() {
1531 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1533 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1536 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1537 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1538 let (funding_pubkey_a, funding_pubkey_b) = if local {
1539 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1541 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1544 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1545 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1550 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1551 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1556 let num_nondust_htlcs = included_non_dust_htlcs.len();
1558 let channel_parameters =
1559 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1560 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1561 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1568 &mut included_non_dust_htlcs,
1571 let mut htlcs_included = included_non_dust_htlcs;
1572 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1573 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1574 htlcs_included.append(&mut included_dust_htlcs);
1576 // For the stats, trimmed-to-0 the value in msats accordingly
1577 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1578 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1586 local_balance_msat: value_to_self_msat as u64,
1587 remote_balance_msat: value_to_remote_msat as u64,
1588 inbound_htlc_preimages,
1589 outbound_htlc_preimages,
1594 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1595 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1596 /// our counterparty!)
1597 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1598 /// TODO Some magic rust shit to compile-time check this?
1599 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1600 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1601 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1602 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1603 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1605 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1609 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1610 /// will sign and send to our counterparty.
1611 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1612 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1613 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1614 //may see payments to it!
1615 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1616 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1617 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1619 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1622 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1623 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1624 /// Panics if called before accept_channel/InboundV1Channel::new
1625 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1626 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1629 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1630 &self.get_counterparty_pubkeys().funding_pubkey
1633 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1637 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1638 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1639 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1640 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1641 // more dust balance if the feerate increases when we have several HTLCs pending
1642 // which are near the dust limit.
1643 let mut feerate_per_kw = self.feerate_per_kw;
1644 // If there's a pending update fee, use it to ensure we aren't under-estimating
1645 // potential feerate updates coming soon.
1646 if let Some((feerate, _)) = self.pending_update_fee {
1647 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1649 if let Some(feerate) = outbound_feerate_update {
1650 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1652 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1655 /// Get forwarding information for the counterparty.
1656 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1657 self.counterparty_forwarding_info.clone()
1660 /// Returns a HTLCStats about inbound pending htlcs
1661 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1663 let mut stats = HTLCStats {
1664 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1665 pending_htlcs_value_msat: 0,
1666 on_counterparty_tx_dust_exposure_msat: 0,
1667 on_holder_tx_dust_exposure_msat: 0,
1668 holding_cell_msat: 0,
1669 on_holder_tx_holding_cell_htlcs_count: 0,
1672 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1675 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1676 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1677 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1679 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1680 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1681 for ref htlc in context.pending_inbound_htlcs.iter() {
1682 stats.pending_htlcs_value_msat += htlc.amount_msat;
1683 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1684 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1686 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1687 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1693 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1694 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1696 let mut stats = HTLCStats {
1697 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1698 pending_htlcs_value_msat: 0,
1699 on_counterparty_tx_dust_exposure_msat: 0,
1700 on_holder_tx_dust_exposure_msat: 0,
1701 holding_cell_msat: 0,
1702 on_holder_tx_holding_cell_htlcs_count: 0,
1705 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1708 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1709 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1710 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1712 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1713 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1714 for ref htlc in context.pending_outbound_htlcs.iter() {
1715 stats.pending_htlcs_value_msat += htlc.amount_msat;
1716 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1717 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1719 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1720 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1724 for update in context.holding_cell_htlc_updates.iter() {
1725 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1726 stats.pending_htlcs += 1;
1727 stats.pending_htlcs_value_msat += amount_msat;
1728 stats.holding_cell_msat += amount_msat;
1729 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1730 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1732 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1733 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1735 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1742 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1743 /// Doesn't bother handling the
1744 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1745 /// corner case properly.
1746 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1747 -> AvailableBalances
1748 where F::Target: FeeEstimator
1750 let context = &self;
1751 // Note that we have to handle overflow due to the above case.
1752 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1753 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1755 let mut balance_msat = context.value_to_self_msat;
1756 for ref htlc in context.pending_inbound_htlcs.iter() {
1757 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1758 balance_msat += htlc.amount_msat;
1761 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1763 let outbound_capacity_msat = context.value_to_self_msat
1764 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1766 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1768 let mut available_capacity_msat = outbound_capacity_msat;
1770 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1771 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1775 if context.is_outbound() {
1776 // We should mind channel commit tx fee when computing how much of the available capacity
1777 // can be used in the next htlc. Mirrors the logic in send_htlc.
1779 // The fee depends on whether the amount we will be sending is above dust or not,
1780 // and the answer will in turn change the amount itself — making it a circular
1782 // This complicates the computation around dust-values, up to the one-htlc-value.
1783 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1784 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1785 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1788 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1789 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1790 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1791 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1792 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1793 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1794 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1797 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1798 // value ends up being below dust, we have this fee available again. In that case,
1799 // match the value to right-below-dust.
1800 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1801 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1802 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1803 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1804 debug_assert!(one_htlc_difference_msat != 0);
1805 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1806 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1807 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1809 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1812 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1813 // sending a new HTLC won't reduce their balance below our reserve threshold.
1814 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1815 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1816 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1819 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1820 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1822 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1823 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1824 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1826 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1827 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1828 // we've selected for them, we can only send dust HTLCs.
1829 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1833 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1835 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1836 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1837 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1838 // send above the dust limit (as the router can always overpay to meet the dust limit).
1839 let mut remaining_msat_below_dust_exposure_limit = None;
1840 let mut dust_exposure_dust_limit_msat = 0;
1841 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1843 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1844 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1846 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1847 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1848 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1850 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1851 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1852 remaining_msat_below_dust_exposure_limit =
1853 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1854 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1857 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1858 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1859 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1860 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1861 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1862 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1865 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1866 if available_capacity_msat < dust_exposure_dust_limit_msat {
1867 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1869 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1873 available_capacity_msat = cmp::min(available_capacity_msat,
1874 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1876 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1877 available_capacity_msat = 0;
1881 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1882 - context.value_to_self_msat as i64
1883 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1884 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1886 outbound_capacity_msat,
1887 next_outbound_htlc_limit_msat: available_capacity_msat,
1888 next_outbound_htlc_minimum_msat,
1893 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1894 let context = &self;
1895 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1898 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1899 /// number of pending HTLCs that are on track to be in our next commitment tx.
1901 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1902 /// `fee_spike_buffer_htlc` is `Some`.
1904 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1905 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1907 /// Dust HTLCs are excluded.
1908 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1909 let context = &self;
1910 assert!(context.is_outbound());
1912 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1915 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1916 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1918 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1919 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1921 let mut addl_htlcs = 0;
1922 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1924 HTLCInitiator::LocalOffered => {
1925 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1929 HTLCInitiator::RemoteOffered => {
1930 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1936 let mut included_htlcs = 0;
1937 for ref htlc in context.pending_inbound_htlcs.iter() {
1938 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1941 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1942 // transaction including this HTLC if it times out before they RAA.
1943 included_htlcs += 1;
1946 for ref htlc in context.pending_outbound_htlcs.iter() {
1947 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1951 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1952 OutboundHTLCState::Committed => included_htlcs += 1,
1953 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1954 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1955 // transaction won't be generated until they send us their next RAA, which will mean
1956 // dropping any HTLCs in this state.
1961 for htlc in context.holding_cell_htlc_updates.iter() {
1963 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1964 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1969 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1970 // ack we're guaranteed to never include them in commitment txs anymore.
1974 let num_htlcs = included_htlcs + addl_htlcs;
1975 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1976 #[cfg(any(test, fuzzing))]
1979 if fee_spike_buffer_htlc.is_some() {
1980 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1982 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1983 + context.holding_cell_htlc_updates.len();
1984 let commitment_tx_info = CommitmentTxInfoCached {
1986 total_pending_htlcs,
1987 next_holder_htlc_id: match htlc.origin {
1988 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1989 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1991 next_counterparty_htlc_id: match htlc.origin {
1992 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1993 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1995 feerate: context.feerate_per_kw,
1997 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2002 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2003 /// pending HTLCs that are on track to be in their next commitment tx
2005 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2006 /// `fee_spike_buffer_htlc` is `Some`.
2008 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2009 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2011 /// Dust HTLCs are excluded.
2012 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2013 let context = &self;
2014 assert!(!context.is_outbound());
2016 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2019 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2020 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2022 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2023 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2025 let mut addl_htlcs = 0;
2026 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2028 HTLCInitiator::LocalOffered => {
2029 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2033 HTLCInitiator::RemoteOffered => {
2034 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2040 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2041 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2042 // committed outbound HTLCs, see below.
2043 let mut included_htlcs = 0;
2044 for ref htlc in context.pending_inbound_htlcs.iter() {
2045 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2048 included_htlcs += 1;
2051 for ref htlc in context.pending_outbound_htlcs.iter() {
2052 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2055 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2056 // i.e. if they've responded to us with an RAA after announcement.
2058 OutboundHTLCState::Committed => included_htlcs += 1,
2059 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2060 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2065 let num_htlcs = included_htlcs + addl_htlcs;
2066 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2067 #[cfg(any(test, fuzzing))]
2070 if fee_spike_buffer_htlc.is_some() {
2071 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2073 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2074 let commitment_tx_info = CommitmentTxInfoCached {
2076 total_pending_htlcs,
2077 next_holder_htlc_id: match htlc.origin {
2078 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2079 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2081 next_counterparty_htlc_id: match htlc.origin {
2082 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2083 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2085 feerate: context.feerate_per_kw,
2087 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2092 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2093 where F: Fn() -> Option<O> {
2094 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2095 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2102 /// Returns the transaction if there is a pending funding transaction that is yet to be
2104 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2105 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2108 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2110 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2111 self.if_unbroadcasted_funding(||
2112 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2116 /// Returns whether the channel is funded in a batch.
2117 pub fn is_batch_funding(&self) -> bool {
2118 self.is_batch_funding.is_some()
2121 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2123 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2124 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2127 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2128 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2129 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2130 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2131 /// immediately (others we will have to allow to time out).
2132 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2133 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2134 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2135 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2136 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2137 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2139 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2140 // return them to fail the payment.
2141 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2142 let counterparty_node_id = self.get_counterparty_node_id();
2143 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2145 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2146 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2151 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2152 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2153 // returning a channel monitor update here would imply a channel monitor update before
2154 // we even registered the channel monitor to begin with, which is invalid.
2155 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2156 // funding transaction, don't return a funding txo (which prevents providing the
2157 // monitor update to the user, even if we return one).
2158 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2159 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2160 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2161 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2162 update_id: self.latest_monitor_update_id,
2163 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2167 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2169 self.channel_state = ChannelState::ShutdownComplete as u32;
2170 self.update_time_counter += 1;
2173 dropped_outbound_htlcs,
2174 unbroadcasted_batch_funding_txid,
2175 channel_id: self.channel_id,
2176 counterparty_node_id: self.counterparty_node_id,
2180 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2181 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2182 let counterparty_keys = self.build_remote_transaction_keys();
2183 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2184 let signature = match &self.holder_signer {
2185 // TODO (taproot|arik): move match into calling method for Taproot
2186 ChannelSignerType::Ecdsa(ecdsa) => {
2187 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2188 .map(|(sig, _)| sig).ok()?
2190 // TODO (taproot|arik)
2195 if self.signer_pending_funding {
2196 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2197 self.signer_pending_funding = false;
2200 Some(msgs::FundingCreated {
2201 temporary_channel_id: self.temporary_channel_id.unwrap(),
2202 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2203 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2206 partial_signature_with_nonce: None,
2208 next_local_nonce: None,
2212 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2213 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2214 let counterparty_keys = self.build_remote_transaction_keys();
2215 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2217 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2218 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2219 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2220 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2222 match &self.holder_signer {
2223 // TODO (arik): move match into calling method for Taproot
2224 ChannelSignerType::Ecdsa(ecdsa) => {
2225 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2226 .map(|(signature, _)| msgs::FundingSigned {
2227 channel_id: self.channel_id(),
2230 partial_signature_with_nonce: None,
2234 if funding_signed.is_none() {
2235 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2236 self.signer_pending_funding = true;
2237 } else if self.signer_pending_funding {
2238 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2239 self.signer_pending_funding = false;
2242 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2243 (counterparty_initial_commitment_tx, funding_signed)
2245 // TODO (taproot|arik)
2252 // Internal utility functions for channels
2254 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2255 /// `channel_value_satoshis` in msat, set through
2256 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2258 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2260 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2261 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2262 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2264 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2267 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2269 channel_value_satoshis * 10 * configured_percent
2272 /// Returns a minimum channel reserve value the remote needs to maintain,
2273 /// required by us according to the configured or default
2274 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2276 /// Guaranteed to return a value no larger than channel_value_satoshis
2278 /// This is used both for outbound and inbound channels and has lower bound
2279 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2280 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2281 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2282 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2285 /// This is for legacy reasons, present for forward-compatibility.
2286 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2287 /// from storage. Hence, we use this function to not persist default values of
2288 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2289 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2290 let (q, _) = channel_value_satoshis.overflowing_div(100);
2291 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2294 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2295 // Note that num_htlcs should not include dust HTLCs.
2297 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2298 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2301 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2302 // Note that num_htlcs should not include dust HTLCs.
2303 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2304 // Note that we need to divide before multiplying to round properly,
2305 // since the lowest denomination of bitcoin on-chain is the satoshi.
2306 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2309 // Holder designates channel data owned for the benefit of the user client.
2310 // Counterparty designates channel data owned by the another channel participant entity.
2311 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2312 pub context: ChannelContext<SP>,
2315 #[cfg(any(test, fuzzing))]
2316 struct CommitmentTxInfoCached {
2318 total_pending_htlcs: usize,
2319 next_holder_htlc_id: u64,
2320 next_counterparty_htlc_id: u64,
2324 impl<SP: Deref> Channel<SP> where
2325 SP::Target: SignerProvider,
2326 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2328 fn check_remote_fee<F: Deref, L: Deref>(
2329 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2330 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2331 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2333 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2334 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2336 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2338 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2339 if feerate_per_kw < lower_limit {
2340 if let Some(cur_feerate) = cur_feerate_per_kw {
2341 if feerate_per_kw > cur_feerate {
2343 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2344 cur_feerate, feerate_per_kw);
2348 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2354 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2355 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2356 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2357 // outside of those situations will fail.
2358 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2362 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2367 1 + // script length (0)
2371 )*4 + // * 4 for non-witness parts
2372 2 + // witness marker and flag
2373 1 + // witness element count
2374 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2375 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2376 2*(1 + 71); // two signatures + sighash type flags
2377 if let Some(spk) = a_scriptpubkey {
2378 ret += ((8+1) + // output values and script length
2379 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2381 if let Some(spk) = b_scriptpubkey {
2382 ret += ((8+1) + // output values and script length
2383 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2389 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2390 assert!(self.context.pending_inbound_htlcs.is_empty());
2391 assert!(self.context.pending_outbound_htlcs.is_empty());
2392 assert!(self.context.pending_update_fee.is_none());
2394 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2395 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2396 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2398 if value_to_holder < 0 {
2399 assert!(self.context.is_outbound());
2400 total_fee_satoshis += (-value_to_holder) as u64;
2401 } else if value_to_counterparty < 0 {
2402 assert!(!self.context.is_outbound());
2403 total_fee_satoshis += (-value_to_counterparty) as u64;
2406 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2407 value_to_counterparty = 0;
2410 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2411 value_to_holder = 0;
2414 assert!(self.context.shutdown_scriptpubkey.is_some());
2415 let holder_shutdown_script = self.get_closing_scriptpubkey();
2416 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2417 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2419 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2420 (closing_transaction, total_fee_satoshis)
2423 fn funding_outpoint(&self) -> OutPoint {
2424 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2427 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2430 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2431 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2433 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2435 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2436 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2437 where L::Target: Logger {
2438 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2439 // (see equivalent if condition there).
2440 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2441 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2442 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2443 self.context.latest_monitor_update_id = mon_update_id;
2444 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2445 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2449 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2450 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2451 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2452 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2454 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2455 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2457 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2459 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2460 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2461 // these, but for now we just have to treat them as normal.
2463 let mut pending_idx = core::usize::MAX;
2464 let mut htlc_value_msat = 0;
2465 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2466 if htlc.htlc_id == htlc_id_arg {
2467 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2468 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2469 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2471 InboundHTLCState::Committed => {},
2472 InboundHTLCState::LocalRemoved(ref reason) => {
2473 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2475 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2476 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2478 return UpdateFulfillFetch::DuplicateClaim {};
2481 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2482 // Don't return in release mode here so that we can update channel_monitor
2486 htlc_value_msat = htlc.amount_msat;
2490 if pending_idx == core::usize::MAX {
2491 #[cfg(any(test, fuzzing))]
2492 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2493 // this is simply a duplicate claim, not previously failed and we lost funds.
2494 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2495 return UpdateFulfillFetch::DuplicateClaim {};
2498 // Now update local state:
2500 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2501 // can claim it even if the channel hits the chain before we see their next commitment.
2502 self.context.latest_monitor_update_id += 1;
2503 let monitor_update = ChannelMonitorUpdate {
2504 update_id: self.context.latest_monitor_update_id,
2505 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2506 payment_preimage: payment_preimage_arg.clone(),
2510 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2511 // Note that this condition is the same as the assertion in
2512 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2513 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2514 // do not not get into this branch.
2515 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2516 match pending_update {
2517 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2518 if htlc_id_arg == htlc_id {
2519 // Make sure we don't leave latest_monitor_update_id incremented here:
2520 self.context.latest_monitor_update_id -= 1;
2521 #[cfg(any(test, fuzzing))]
2522 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2523 return UpdateFulfillFetch::DuplicateClaim {};
2526 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2527 if htlc_id_arg == htlc_id {
2528 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2529 // TODO: We may actually be able to switch to a fulfill here, though its
2530 // rare enough it may not be worth the complexity burden.
2531 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2532 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2538 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2539 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2540 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2542 #[cfg(any(test, fuzzing))]
2543 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2544 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2546 #[cfg(any(test, fuzzing))]
2547 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2550 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2551 if let InboundHTLCState::Committed = htlc.state {
2553 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2554 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2556 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2557 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2560 UpdateFulfillFetch::NewClaim {
2563 msg: Some(msgs::UpdateFulfillHTLC {
2564 channel_id: self.context.channel_id(),
2565 htlc_id: htlc_id_arg,
2566 payment_preimage: payment_preimage_arg,
2571 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2572 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2573 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2574 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2575 // Even if we aren't supposed to let new monitor updates with commitment state
2576 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2577 // matter what. Sadly, to push a new monitor update which flies before others
2578 // already queued, we have to insert it into the pending queue and update the
2579 // update_ids of all the following monitors.
2580 if release_cs_monitor && msg.is_some() {
2581 let mut additional_update = self.build_commitment_no_status_check(logger);
2582 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2583 // to be strictly increasing by one, so decrement it here.
2584 self.context.latest_monitor_update_id = monitor_update.update_id;
2585 monitor_update.updates.append(&mut additional_update.updates);
2587 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2588 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2589 monitor_update.update_id = new_mon_id;
2590 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2591 held_update.update.update_id += 1;
2594 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2595 let update = self.build_commitment_no_status_check(logger);
2596 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2602 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2603 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2605 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2609 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2610 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2611 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2612 /// before we fail backwards.
2614 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2615 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2616 /// [`ChannelError::Ignore`].
2617 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2618 -> Result<(), ChannelError> where L::Target: Logger {
2619 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2620 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2623 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2624 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2625 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2626 /// before we fail backwards.
2628 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2629 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2630 /// [`ChannelError::Ignore`].
2631 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2632 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2633 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2634 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2636 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2638 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2639 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2640 // these, but for now we just have to treat them as normal.
2642 let mut pending_idx = core::usize::MAX;
2643 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2644 if htlc.htlc_id == htlc_id_arg {
2646 InboundHTLCState::Committed => {},
2647 InboundHTLCState::LocalRemoved(ref reason) => {
2648 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2650 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2655 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2656 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2662 if pending_idx == core::usize::MAX {
2663 #[cfg(any(test, fuzzing))]
2664 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2665 // is simply a duplicate fail, not previously failed and we failed-back too early.
2666 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2670 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2671 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2672 force_holding_cell = true;
2675 // Now update local state:
2676 if force_holding_cell {
2677 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2678 match pending_update {
2679 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2680 if htlc_id_arg == htlc_id {
2681 #[cfg(any(test, fuzzing))]
2682 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2686 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2687 if htlc_id_arg == htlc_id {
2688 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2689 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2695 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2696 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2697 htlc_id: htlc_id_arg,
2703 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2705 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2706 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2709 Ok(Some(msgs::UpdateFailHTLC {
2710 channel_id: self.context.channel_id(),
2711 htlc_id: htlc_id_arg,
2716 // Message handlers:
2718 /// Handles a funding_signed message from the remote end.
2719 /// If this call is successful, broadcast the funding transaction (and not before!)
2720 pub fn funding_signed<L: Deref>(
2721 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2722 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
2726 if !self.context.is_outbound() {
2727 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2729 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2730 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2732 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2733 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2734 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2735 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2738 let funding_script = self.context.get_funding_redeemscript();
2740 let counterparty_keys = self.context.build_remote_transaction_keys();
2741 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2742 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2743 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2745 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2746 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2748 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2749 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2751 let trusted_tx = initial_commitment_tx.trust();
2752 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2753 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2754 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2755 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2756 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2760 let holder_commitment_tx = HolderCommitmentTransaction::new(
2761 initial_commitment_tx,
2764 &self.context.get_holder_pubkeys().funding_pubkey,
2765 self.context.counterparty_funding_pubkey()
2768 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2769 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2772 let funding_redeemscript = self.context.get_funding_redeemscript();
2773 let funding_txo = self.context.get_funding_txo().unwrap();
2774 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2775 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2776 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2777 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2778 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2779 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2780 shutdown_script, self.context.get_holder_selected_contest_delay(),
2781 &self.context.destination_script, (funding_txo, funding_txo_script),
2782 &self.context.channel_transaction_parameters,
2783 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2785 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2786 let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
2787 channel_monitor.provide_initial_counterparty_commitment_tx(
2788 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2789 self.context.cur_counterparty_commitment_transaction_number,
2790 self.context.counterparty_cur_commitment_point.unwrap(),
2791 counterparty_initial_commitment_tx.feerate_per_kw(),
2792 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2793 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
2795 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2796 if self.context.is_batch_funding() {
2797 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2799 self.context.channel_state = ChannelState::FundingSent as u32;
2801 self.context.cur_holder_commitment_transaction_number -= 1;
2802 self.context.cur_counterparty_commitment_transaction_number -= 1;
2804 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2806 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2807 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2811 /// Updates the state of the channel to indicate that all channels in the batch have received
2812 /// funding_signed and persisted their monitors.
2813 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2814 /// treated as a non-batch channel going forward.
2815 pub fn set_batch_ready(&mut self) {
2816 self.context.is_batch_funding = None;
2817 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2820 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2821 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2823 pub fn channel_ready<NS: Deref, L: Deref>(
2824 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2825 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2826 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2828 NS::Target: NodeSigner,
2831 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2832 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2833 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2836 if let Some(scid_alias) = msg.short_channel_id_alias {
2837 if Some(scid_alias) != self.context.short_channel_id {
2838 // The scid alias provided can be used to route payments *from* our counterparty,
2839 // i.e. can be used for inbound payments and provided in invoices, but is not used
2840 // when routing outbound payments.
2841 self.context.latest_inbound_scid_alias = Some(scid_alias);
2845 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2847 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2848 // batch, but we can receive channel_ready messages.
2850 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2851 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2853 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2854 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2855 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2856 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2857 self.context.update_time_counter += 1;
2858 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2859 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2860 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2861 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2863 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2864 // required, or they're sending a fresh SCID alias.
2865 let expected_point =
2866 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2867 // If they haven't ever sent an updated point, the point they send should match
2869 self.context.counterparty_cur_commitment_point
2870 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2871 // If we've advanced the commitment number once, the second commitment point is
2872 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2873 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2874 self.context.counterparty_prev_commitment_point
2876 // If they have sent updated points, channel_ready is always supposed to match
2877 // their "first" point, which we re-derive here.
2878 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2879 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2880 ).expect("We already advanced, so previous secret keys should have been validated already")))
2882 if expected_point != Some(msg.next_per_commitment_point) {
2883 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2887 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2890 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2891 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2893 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2895 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2898 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2899 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2900 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2901 ) -> Result<(), ChannelError>
2902 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2903 FE::Target: FeeEstimator, L::Target: Logger,
2905 // We can't accept HTLCs sent after we've sent a shutdown.
2906 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2907 if local_sent_shutdown {
2908 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2910 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2911 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2912 if remote_sent_shutdown {
2913 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2915 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2916 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2918 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2919 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2921 if msg.amount_msat == 0 {
2922 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2924 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2925 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2928 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2929 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2930 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2931 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2933 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2934 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2937 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2938 // the reserve_satoshis we told them to always have as direct payment so that they lose
2939 // something if we punish them for broadcasting an old state).
2940 // Note that we don't really care about having a small/no to_remote output in our local
2941 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2942 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2943 // present in the next commitment transaction we send them (at least for fulfilled ones,
2944 // failed ones won't modify value_to_self).
2945 // Note that we will send HTLCs which another instance of rust-lightning would think
2946 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2947 // Channel state once they will not be present in the next received commitment
2949 let mut removed_outbound_total_msat = 0;
2950 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2951 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2952 removed_outbound_total_msat += htlc.amount_msat;
2953 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2954 removed_outbound_total_msat += htlc.amount_msat;
2958 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2959 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2962 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2963 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2964 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2966 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2967 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2968 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2969 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2970 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2971 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2972 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2976 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2977 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2978 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2979 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2980 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2981 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2982 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2986 let pending_value_to_self_msat =
2987 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2988 let pending_remote_value_msat =
2989 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2990 if pending_remote_value_msat < msg.amount_msat {
2991 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2994 // Check that the remote can afford to pay for this HTLC on-chain at the current
2995 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2997 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2998 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2999 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3001 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3002 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3006 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3007 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3009 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3010 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3014 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3015 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3019 if !self.context.is_outbound() {
3020 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3021 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3022 // side, only on the sender's. Note that with anchor outputs we are no longer as
3023 // sensitive to fee spikes, so we need to account for them.
3024 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3025 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3026 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3027 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3029 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3030 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3031 // the HTLC, i.e. its status is already set to failing.
3032 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3033 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3036 // Check that they won't violate our local required channel reserve by adding this HTLC.
3037 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3038 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3039 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3040 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3043 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3044 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3046 if msg.cltv_expiry >= 500000000 {
3047 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3050 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3051 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3052 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3056 // Now update local state:
3057 self.context.next_counterparty_htlc_id += 1;
3058 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3059 htlc_id: msg.htlc_id,
3060 amount_msat: msg.amount_msat,
3061 payment_hash: msg.payment_hash,
3062 cltv_expiry: msg.cltv_expiry,
3063 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3068 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3070 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3071 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3072 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3073 if htlc.htlc_id == htlc_id {
3074 let outcome = match check_preimage {
3075 None => fail_reason.into(),
3076 Some(payment_preimage) => {
3077 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3078 if payment_hash != htlc.payment_hash {
3079 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3081 OutboundHTLCOutcome::Success(Some(payment_preimage))
3085 OutboundHTLCState::LocalAnnounced(_) =>
3086 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3087 OutboundHTLCState::Committed => {
3088 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3090 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3091 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3096 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3099 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3100 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3101 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3103 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3104 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3107 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3110 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3111 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3112 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3114 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3115 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3118 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3122 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3123 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3124 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3126 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3127 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3130 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3134 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3135 where L::Target: Logger
3137 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3138 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3140 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3141 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3143 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3144 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3147 let funding_script = self.context.get_funding_redeemscript();
3149 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3151 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3152 let commitment_txid = {
3153 let trusted_tx = commitment_stats.tx.trust();
3154 let bitcoin_tx = trusted_tx.built_transaction();
3155 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3157 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3158 log_bytes!(msg.signature.serialize_compact()[..]),
3159 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3160 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3161 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3162 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3166 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3168 // If our counterparty updated the channel fee in this commitment transaction, check that
3169 // they can actually afford the new fee now.
3170 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3171 update_state == FeeUpdateState::RemoteAnnounced
3174 debug_assert!(!self.context.is_outbound());
3175 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3176 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3177 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3180 #[cfg(any(test, fuzzing))]
3182 if self.context.is_outbound() {
3183 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3184 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3185 if let Some(info) = projected_commit_tx_info {
3186 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3187 + self.context.holding_cell_htlc_updates.len();
3188 if info.total_pending_htlcs == total_pending_htlcs
3189 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3190 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3191 && info.feerate == self.context.feerate_per_kw {
3192 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3198 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3199 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3202 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3203 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3204 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3205 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3206 // backwards compatibility, we never use it in production. To provide test coverage, here,
3207 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3208 #[allow(unused_assignments, unused_mut)]
3209 let mut separate_nondust_htlc_sources = false;
3210 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3211 use core::hash::{BuildHasher, Hasher};
3212 // Get a random value using the only std API to do so - the DefaultHasher
3213 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3214 separate_nondust_htlc_sources = rand_val % 2 == 0;
3217 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3218 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3219 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3220 if let Some(_) = htlc.transaction_output_index {
3221 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3222 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3223 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3225 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3226 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3227 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3228 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3229 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3230 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3231 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3232 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3234 if !separate_nondust_htlc_sources {
3235 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3238 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3240 if separate_nondust_htlc_sources {
3241 if let Some(source) = source_opt.take() {
3242 nondust_htlc_sources.push(source);
3245 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3248 let holder_commitment_tx = HolderCommitmentTransaction::new(
3249 commitment_stats.tx,
3251 msg.htlc_signatures.clone(),
3252 &self.context.get_holder_pubkeys().funding_pubkey,
3253 self.context.counterparty_funding_pubkey()
3256 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3257 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3259 // Update state now that we've passed all the can-fail calls...
3260 let mut need_commitment = false;
3261 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3262 if *update_state == FeeUpdateState::RemoteAnnounced {
3263 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3264 need_commitment = true;
3268 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3269 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3270 Some(forward_info.clone())
3272 if let Some(forward_info) = new_forward {
3273 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3274 &htlc.payment_hash, &self.context.channel_id);
3275 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3276 need_commitment = true;
3279 let mut claimed_htlcs = Vec::new();
3280 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3281 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3282 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3283 &htlc.payment_hash, &self.context.channel_id);
3284 // Grab the preimage, if it exists, instead of cloning
3285 let mut reason = OutboundHTLCOutcome::Success(None);
3286 mem::swap(outcome, &mut reason);
3287 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3288 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3289 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3290 // have a `Success(None)` reason. In this case we could forget some HTLC
3291 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3292 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3294 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3296 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3297 need_commitment = true;
3301 self.context.latest_monitor_update_id += 1;
3302 let mut monitor_update = ChannelMonitorUpdate {
3303 update_id: self.context.latest_monitor_update_id,
3304 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3305 commitment_tx: holder_commitment_tx,
3306 htlc_outputs: htlcs_and_sigs,
3308 nondust_htlc_sources,
3312 self.context.cur_holder_commitment_transaction_number -= 1;
3313 self.context.expecting_peer_commitment_signed = false;
3314 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3315 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3316 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3318 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3319 // In case we initially failed monitor updating without requiring a response, we need
3320 // to make sure the RAA gets sent first.
3321 self.context.monitor_pending_revoke_and_ack = true;
3322 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3323 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3324 // the corresponding HTLC status updates so that
3325 // get_last_commitment_update_for_send includes the right HTLCs.
3326 self.context.monitor_pending_commitment_signed = true;
3327 let mut additional_update = self.build_commitment_no_status_check(logger);
3328 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3329 // strictly increasing by one, so decrement it here.
3330 self.context.latest_monitor_update_id = monitor_update.update_id;
3331 monitor_update.updates.append(&mut additional_update.updates);
3333 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3334 &self.context.channel_id);
3335 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3338 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3339 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3340 // we'll send one right away when we get the revoke_and_ack when we
3341 // free_holding_cell_htlcs().
3342 let mut additional_update = self.build_commitment_no_status_check(logger);
3343 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3344 // strictly increasing by one, so decrement it here.
3345 self.context.latest_monitor_update_id = monitor_update.update_id;
3346 monitor_update.updates.append(&mut additional_update.updates);
3350 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3351 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3352 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3353 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3356 /// Public version of the below, checking relevant preconditions first.
3357 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3358 /// returns `(None, Vec::new())`.
3359 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3360 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3361 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3362 where F::Target: FeeEstimator, L::Target: Logger
3364 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3365 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3366 self.free_holding_cell_htlcs(fee_estimator, logger)
3367 } else { (None, Vec::new()) }
3370 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3371 /// for our counterparty.
3372 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3373 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3374 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3375 where F::Target: FeeEstimator, L::Target: Logger
3377 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3378 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3379 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3380 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3382 let mut monitor_update = ChannelMonitorUpdate {
3383 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3384 updates: Vec::new(),
3387 let mut htlc_updates = Vec::new();
3388 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3389 let mut update_add_count = 0;
3390 let mut update_fulfill_count = 0;
3391 let mut update_fail_count = 0;
3392 let mut htlcs_to_fail = Vec::new();
3393 for htlc_update in htlc_updates.drain(..) {
3394 // Note that this *can* fail, though it should be due to rather-rare conditions on
3395 // fee races with adding too many outputs which push our total payments just over
3396 // the limit. In case it's less rare than I anticipate, we may want to revisit
3397 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3398 // to rebalance channels.
3399 match &htlc_update {
3400 &HTLCUpdateAwaitingACK::AddHTLC {
3401 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3402 skimmed_fee_msat, blinding_point, ..
3404 match self.send_htlc(
3405 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3406 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3408 Ok(_) => update_add_count += 1,
3411 ChannelError::Ignore(ref msg) => {
3412 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3413 // If we fail to send here, then this HTLC should
3414 // be failed backwards. Failing to send here
3415 // indicates that this HTLC may keep being put back
3416 // into the holding cell without ever being
3417 // successfully forwarded/failed/fulfilled, causing
3418 // our counterparty to eventually close on us.
3419 htlcs_to_fail.push((source.clone(), *payment_hash));
3422 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3428 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3429 // If an HTLC claim was previously added to the holding cell (via
3430 // `get_update_fulfill_htlc`, then generating the claim message itself must
3431 // not fail - any in between attempts to claim the HTLC will have resulted
3432 // in it hitting the holding cell again and we cannot change the state of a
3433 // holding cell HTLC from fulfill to anything else.
3434 let mut additional_monitor_update =
3435 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3436 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3437 { monitor_update } else { unreachable!() };
3438 update_fulfill_count += 1;
3439 monitor_update.updates.append(&mut additional_monitor_update.updates);
3441 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3442 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3443 Ok(update_fail_msg_option) => {
3444 // If an HTLC failure was previously added to the holding cell (via
3445 // `queue_fail_htlc`) then generating the fail message itself must
3446 // not fail - we should never end up in a state where we double-fail
3447 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3448 // for a full revocation before failing.
3449 debug_assert!(update_fail_msg_option.is_some());
3450 update_fail_count += 1;
3453 if let ChannelError::Ignore(_) = e {}
3455 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3462 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3463 return (None, htlcs_to_fail);
3465 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3466 self.send_update_fee(feerate, false, fee_estimator, logger)
3471 let mut additional_update = self.build_commitment_no_status_check(logger);
3472 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3473 // but we want them to be strictly increasing by one, so reset it here.
3474 self.context.latest_monitor_update_id = monitor_update.update_id;
3475 monitor_update.updates.append(&mut additional_update.updates);
3477 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3478 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3479 update_add_count, update_fulfill_count, update_fail_count);
3481 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3482 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3488 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3489 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3490 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3491 /// generating an appropriate error *after* the channel state has been updated based on the
3492 /// revoke_and_ack message.
3493 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3494 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3495 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3496 where F::Target: FeeEstimator, L::Target: Logger,
3498 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3499 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3501 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3502 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3504 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3505 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3508 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3510 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3511 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3512 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3516 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3517 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3518 // haven't given them a new commitment transaction to broadcast). We should probably
3519 // take advantage of this by updating our channel monitor, sending them an error, and
3520 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3521 // lot of work, and there's some chance this is all a misunderstanding anyway.
3522 // We have to do *something*, though, since our signer may get mad at us for otherwise
3523 // jumping a remote commitment number, so best to just force-close and move on.
3524 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3527 #[cfg(any(test, fuzzing))]
3529 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3530 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3533 match &self.context.holder_signer {
3534 ChannelSignerType::Ecdsa(ecdsa) => {
3535 ecdsa.validate_counterparty_revocation(
3536 self.context.cur_counterparty_commitment_transaction_number + 1,
3538 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3540 // TODO (taproot|arik)
3545 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3546 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3547 self.context.latest_monitor_update_id += 1;
3548 let mut monitor_update = ChannelMonitorUpdate {
3549 update_id: self.context.latest_monitor_update_id,
3550 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3551 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3552 secret: msg.per_commitment_secret,
3556 // Update state now that we've passed all the can-fail calls...
3557 // (note that we may still fail to generate the new commitment_signed message, but that's
3558 // OK, we step the channel here and *then* if the new generation fails we can fail the
3559 // channel based on that, but stepping stuff here should be safe either way.
3560 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3561 self.context.sent_message_awaiting_response = None;
3562 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3563 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3564 self.context.cur_counterparty_commitment_transaction_number -= 1;
3566 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3567 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3570 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3571 let mut to_forward_infos = Vec::new();
3572 let mut revoked_htlcs = Vec::new();
3573 let mut finalized_claimed_htlcs = Vec::new();
3574 let mut update_fail_htlcs = Vec::new();
3575 let mut update_fail_malformed_htlcs = Vec::new();
3576 let mut require_commitment = false;
3577 let mut value_to_self_msat_diff: i64 = 0;
3580 // Take references explicitly so that we can hold multiple references to self.context.
3581 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3582 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3583 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3585 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3586 pending_inbound_htlcs.retain(|htlc| {
3587 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3588 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3589 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3590 value_to_self_msat_diff += htlc.amount_msat as i64;
3592 *expecting_peer_commitment_signed = true;
3596 pending_outbound_htlcs.retain(|htlc| {
3597 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3598 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3599 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3600 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3602 finalized_claimed_htlcs.push(htlc.source.clone());
3603 // They fulfilled, so we sent them money
3604 value_to_self_msat_diff -= htlc.amount_msat as i64;
3609 for htlc in pending_inbound_htlcs.iter_mut() {
3610 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3612 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3616 let mut state = InboundHTLCState::Committed;
3617 mem::swap(&mut state, &mut htlc.state);
3619 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3620 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3621 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3622 require_commitment = true;
3623 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3624 match forward_info {
3625 PendingHTLCStatus::Fail(fail_msg) => {
3626 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3627 require_commitment = true;
3629 HTLCFailureMsg::Relay(msg) => {
3630 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3631 update_fail_htlcs.push(msg)
3633 HTLCFailureMsg::Malformed(msg) => {
3634 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3635 update_fail_malformed_htlcs.push(msg)
3639 PendingHTLCStatus::Forward(forward_info) => {
3640 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3641 to_forward_infos.push((forward_info, htlc.htlc_id));
3642 htlc.state = InboundHTLCState::Committed;
3648 for htlc in pending_outbound_htlcs.iter_mut() {
3649 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3650 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3651 htlc.state = OutboundHTLCState::Committed;
3652 *expecting_peer_commitment_signed = true;
3654 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3655 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3656 // Grab the preimage, if it exists, instead of cloning
3657 let mut reason = OutboundHTLCOutcome::Success(None);
3658 mem::swap(outcome, &mut reason);
3659 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3660 require_commitment = true;
3664 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3666 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3667 match update_state {
3668 FeeUpdateState::Outbound => {
3669 debug_assert!(self.context.is_outbound());
3670 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3671 self.context.feerate_per_kw = feerate;
3672 self.context.pending_update_fee = None;
3673 self.context.expecting_peer_commitment_signed = true;
3675 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3676 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3677 debug_assert!(!self.context.is_outbound());
3678 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3679 require_commitment = true;
3680 self.context.feerate_per_kw = feerate;
3681 self.context.pending_update_fee = None;
3686 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3687 let release_state_str =
3688 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3689 macro_rules! return_with_htlcs_to_fail {
3690 ($htlcs_to_fail: expr) => {
3691 if !release_monitor {
3692 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3693 update: monitor_update,
3695 return Ok(($htlcs_to_fail, None));
3697 return Ok(($htlcs_to_fail, Some(monitor_update)));
3702 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3703 // We can't actually generate a new commitment transaction (incl by freeing holding
3704 // cells) while we can't update the monitor, so we just return what we have.
3705 if require_commitment {
3706 self.context.monitor_pending_commitment_signed = true;
3707 // When the monitor updating is restored we'll call
3708 // get_last_commitment_update_for_send(), which does not update state, but we're
3709 // definitely now awaiting a remote revoke before we can step forward any more, so
3711 let mut additional_update = self.build_commitment_no_status_check(logger);
3712 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3713 // strictly increasing by one, so decrement it here.
3714 self.context.latest_monitor_update_id = monitor_update.update_id;
3715 monitor_update.updates.append(&mut additional_update.updates);
3717 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3718 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3719 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3720 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3721 return_with_htlcs_to_fail!(Vec::new());
3724 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3725 (Some(mut additional_update), htlcs_to_fail) => {
3726 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3727 // strictly increasing by one, so decrement it here.
3728 self.context.latest_monitor_update_id = monitor_update.update_id;
3729 monitor_update.updates.append(&mut additional_update.updates);
3731 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3732 &self.context.channel_id(), release_state_str);
3734 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3735 return_with_htlcs_to_fail!(htlcs_to_fail);
3737 (None, htlcs_to_fail) => {
3738 if require_commitment {
3739 let mut additional_update = self.build_commitment_no_status_check(logger);
3741 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3742 // strictly increasing by one, so decrement it here.
3743 self.context.latest_monitor_update_id = monitor_update.update_id;
3744 monitor_update.updates.append(&mut additional_update.updates);
3746 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3747 &self.context.channel_id(),
3748 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3751 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3752 return_with_htlcs_to_fail!(htlcs_to_fail);
3754 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3755 &self.context.channel_id(), release_state_str);
3757 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3758 return_with_htlcs_to_fail!(htlcs_to_fail);
3764 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3765 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3766 /// commitment update.
3767 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3768 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3769 where F::Target: FeeEstimator, L::Target: Logger
3771 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3772 assert!(msg_opt.is_none(), "We forced holding cell?");
3775 /// Adds a pending update to this channel. See the doc for send_htlc for
3776 /// further details on the optionness of the return value.
3777 /// If our balance is too low to cover the cost of the next commitment transaction at the
3778 /// new feerate, the update is cancelled.
3780 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3781 /// [`Channel`] if `force_holding_cell` is false.
3782 fn send_update_fee<F: Deref, L: Deref>(
3783 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3784 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3785 ) -> Option<msgs::UpdateFee>
3786 where F::Target: FeeEstimator, L::Target: Logger
3788 if !self.context.is_outbound() {
3789 panic!("Cannot send fee from inbound channel");
3791 if !self.context.is_usable() {
3792 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3794 if !self.context.is_live() {
3795 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3798 // Before proposing a feerate update, check that we can actually afford the new fee.
3799 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3800 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3801 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3802 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3803 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3804 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3805 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3806 //TODO: auto-close after a number of failures?
3807 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3811 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3812 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3813 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3814 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3815 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3816 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3819 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3820 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3824 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3825 force_holding_cell = true;
3828 if force_holding_cell {
3829 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3833 debug_assert!(self.context.pending_update_fee.is_none());
3834 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3836 Some(msgs::UpdateFee {
3837 channel_id: self.context.channel_id,
3842 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3843 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3845 /// No further message handling calls may be made until a channel_reestablish dance has
3847 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3848 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3849 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3850 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3854 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3855 // While the below code should be idempotent, it's simpler to just return early, as
3856 // redundant disconnect events can fire, though they should be rare.
3860 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3861 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3864 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3865 // will be retransmitted.
3866 self.context.last_sent_closing_fee = None;
3867 self.context.pending_counterparty_closing_signed = None;
3868 self.context.closing_fee_limits = None;
3870 let mut inbound_drop_count = 0;
3871 self.context.pending_inbound_htlcs.retain(|htlc| {
3873 InboundHTLCState::RemoteAnnounced(_) => {
3874 // They sent us an update_add_htlc but we never got the commitment_signed.
3875 // We'll tell them what commitment_signed we're expecting next and they'll drop
3876 // this HTLC accordingly
3877 inbound_drop_count += 1;
3880 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3881 // We received a commitment_signed updating this HTLC and (at least hopefully)
3882 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3883 // in response to it yet, so don't touch it.
3886 InboundHTLCState::Committed => true,
3887 InboundHTLCState::LocalRemoved(_) => {
3888 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3889 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3890 // (that we missed). Keep this around for now and if they tell us they missed
3891 // the commitment_signed we can re-transmit the update then.
3896 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3898 if let Some((_, update_state)) = self.context.pending_update_fee {
3899 if update_state == FeeUpdateState::RemoteAnnounced {
3900 debug_assert!(!self.context.is_outbound());
3901 self.context.pending_update_fee = None;
3905 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3906 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3907 // They sent us an update to remove this but haven't yet sent the corresponding
3908 // commitment_signed, we need to move it back to Committed and they can re-send
3909 // the update upon reconnection.
3910 htlc.state = OutboundHTLCState::Committed;
3914 self.context.sent_message_awaiting_response = None;
3916 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3917 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3921 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3922 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3923 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3924 /// update completes (potentially immediately).
3925 /// The messages which were generated with the monitor update must *not* have been sent to the
3926 /// remote end, and must instead have been dropped. They will be regenerated when
3927 /// [`Self::monitor_updating_restored`] is called.
3929 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3930 /// [`chain::Watch`]: crate::chain::Watch
3931 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3932 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3933 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3934 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3935 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3937 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3938 self.context.monitor_pending_commitment_signed |= resend_commitment;
3939 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3940 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3941 self.context.monitor_pending_failures.append(&mut pending_fails);
3942 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3943 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3946 /// Indicates that the latest ChannelMonitor update has been committed by the client
3947 /// successfully and we should restore normal operation. Returns messages which should be sent
3948 /// to the remote side.
3949 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3950 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3951 user_config: &UserConfig, best_block_height: u32
3952 ) -> MonitorRestoreUpdates
3955 NS::Target: NodeSigner
3957 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3958 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3960 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3961 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3962 // first received the funding_signed.
3963 let mut funding_broadcastable =
3964 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3965 self.context.funding_transaction.take()
3967 // That said, if the funding transaction is already confirmed (ie we're active with a
3968 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3969 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3970 funding_broadcastable = None;
3973 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3974 // (and we assume the user never directly broadcasts the funding transaction and waits for
3975 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3976 // * an inbound channel that failed to persist the monitor on funding_created and we got
3977 // the funding transaction confirmed before the monitor was persisted, or
3978 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3979 let channel_ready = if self.context.monitor_pending_channel_ready {
3980 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3981 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3982 self.context.monitor_pending_channel_ready = false;
3983 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3984 Some(msgs::ChannelReady {
3985 channel_id: self.context.channel_id(),
3986 next_per_commitment_point,
3987 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3991 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3993 let mut accepted_htlcs = Vec::new();
3994 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3995 let mut failed_htlcs = Vec::new();
3996 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3997 let mut finalized_claimed_htlcs = Vec::new();
3998 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4000 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
4001 self.context.monitor_pending_revoke_and_ack = false;
4002 self.context.monitor_pending_commitment_signed = false;
4003 return MonitorRestoreUpdates {
4004 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4005 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4009 let raa = if self.context.monitor_pending_revoke_and_ack {
4010 Some(self.get_last_revoke_and_ack())
4012 let commitment_update = if self.context.monitor_pending_commitment_signed {
4013 self.get_last_commitment_update_for_send(logger).ok()
4015 if commitment_update.is_some() {
4016 self.mark_awaiting_response();
4019 self.context.monitor_pending_revoke_and_ack = false;
4020 self.context.monitor_pending_commitment_signed = false;
4021 let order = self.context.resend_order.clone();
4022 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4023 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4024 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4025 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4026 MonitorRestoreUpdates {
4027 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4031 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4032 where F::Target: FeeEstimator, L::Target: Logger
4034 if self.context.is_outbound() {
4035 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4037 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4038 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4040 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4042 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4043 self.context.update_time_counter += 1;
4044 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4045 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4046 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4047 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4048 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4049 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4050 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4051 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4052 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4053 msg.feerate_per_kw, holder_tx_dust_exposure)));
4055 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4056 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4057 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4063 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4066 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4067 let commitment_update = if self.context.signer_pending_commitment_update {
4068 self.get_last_commitment_update_for_send(logger).ok()
4070 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4071 self.context.get_funding_signed_msg(logger).1
4073 let channel_ready = if funding_signed.is_some() {
4074 self.check_get_channel_ready(0)
4076 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4077 self.context.get_funding_created_msg(logger)
4080 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4081 if commitment_update.is_some() { "a" } else { "no" },
4082 if funding_signed.is_some() { "a" } else { "no" },
4083 if funding_created.is_some() { "a" } else { "no" },
4084 if channel_ready.is_some() { "a" } else { "no" });
4086 SignerResumeUpdates {
4094 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4095 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4096 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4097 msgs::RevokeAndACK {
4098 channel_id: self.context.channel_id,
4099 per_commitment_secret,
4100 next_per_commitment_point,
4102 next_local_nonce: None,
4106 /// Gets the last commitment update for immediate sending to our peer.
4107 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4108 let mut update_add_htlcs = Vec::new();
4109 let mut update_fulfill_htlcs = Vec::new();
4110 let mut update_fail_htlcs = Vec::new();
4111 let mut update_fail_malformed_htlcs = Vec::new();
4113 for htlc in self.context.pending_outbound_htlcs.iter() {
4114 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4115 update_add_htlcs.push(msgs::UpdateAddHTLC {
4116 channel_id: self.context.channel_id(),
4117 htlc_id: htlc.htlc_id,
4118 amount_msat: htlc.amount_msat,
4119 payment_hash: htlc.payment_hash,
4120 cltv_expiry: htlc.cltv_expiry,
4121 onion_routing_packet: (**onion_packet).clone(),
4122 skimmed_fee_msat: htlc.skimmed_fee_msat,
4123 blinding_point: htlc.blinding_point,
4128 for htlc in self.context.pending_inbound_htlcs.iter() {
4129 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4131 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4132 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4133 channel_id: self.context.channel_id(),
4134 htlc_id: htlc.htlc_id,
4135 reason: err_packet.clone()
4138 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4139 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4140 channel_id: self.context.channel_id(),
4141 htlc_id: htlc.htlc_id,
4142 sha256_of_onion: sha256_of_onion.clone(),
4143 failure_code: failure_code.clone(),
4146 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4147 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4148 channel_id: self.context.channel_id(),
4149 htlc_id: htlc.htlc_id,
4150 payment_preimage: payment_preimage.clone(),
4157 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4158 Some(msgs::UpdateFee {
4159 channel_id: self.context.channel_id(),
4160 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4164 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4165 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4166 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4167 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4168 if self.context.signer_pending_commitment_update {
4169 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4170 self.context.signer_pending_commitment_update = false;
4174 if !self.context.signer_pending_commitment_update {
4175 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4176 self.context.signer_pending_commitment_update = true;
4180 Ok(msgs::CommitmentUpdate {
4181 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4186 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4187 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4188 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4189 assert!(self.context.shutdown_scriptpubkey.is_some());
4190 Some(msgs::Shutdown {
4191 channel_id: self.context.channel_id,
4192 scriptpubkey: self.get_closing_scriptpubkey(),
4197 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4198 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4200 /// Some links printed in log lines are included here to check them during build (when run with
4201 /// `cargo doc --document-private-items`):
4202 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4203 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4204 pub fn channel_reestablish<L: Deref, NS: Deref>(
4205 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4206 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4207 ) -> Result<ReestablishResponses, ChannelError>
4210 NS::Target: NodeSigner
4212 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4213 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4214 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4215 // just close here instead of trying to recover.
4216 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4219 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4220 msg.next_local_commitment_number == 0 {
4221 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4224 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4225 if msg.next_remote_commitment_number > 0 {
4226 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4227 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4228 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4229 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4230 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4232 if msg.next_remote_commitment_number > our_commitment_transaction {
4233 macro_rules! log_and_panic {
4234 ($err_msg: expr) => {
4235 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4236 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4239 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4240 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4241 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4242 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4243 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4244 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4245 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4246 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4250 // Before we change the state of the channel, we check if the peer is sending a very old
4251 // commitment transaction number, if yes we send a warning message.
4252 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4253 return Err(ChannelError::Warn(format!(
4254 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4255 msg.next_remote_commitment_number,
4256 our_commitment_transaction
4260 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4261 // remaining cases either succeed or ErrorMessage-fail).
4262 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4263 self.context.sent_message_awaiting_response = None;
4265 let shutdown_msg = self.get_outbound_shutdown();
4267 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4269 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4270 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4271 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4272 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4273 if msg.next_remote_commitment_number != 0 {
4274 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4276 // Short circuit the whole handler as there is nothing we can resend them
4277 return Ok(ReestablishResponses {
4278 channel_ready: None,
4279 raa: None, commitment_update: None,
4280 order: RAACommitmentOrder::CommitmentFirst,
4281 shutdown_msg, announcement_sigs,
4285 // We have OurChannelReady set!
4286 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4287 return Ok(ReestablishResponses {
4288 channel_ready: Some(msgs::ChannelReady {
4289 channel_id: self.context.channel_id(),
4290 next_per_commitment_point,
4291 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4293 raa: None, commitment_update: None,
4294 order: RAACommitmentOrder::CommitmentFirst,
4295 shutdown_msg, announcement_sigs,
4299 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4300 // Remote isn't waiting on any RevokeAndACK from us!
4301 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4303 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4304 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4305 self.context.monitor_pending_revoke_and_ack = true;
4308 Some(self.get_last_revoke_and_ack())
4311 debug_assert!(false, "All values should have been handled in the four cases above");
4312 return Err(ChannelError::Close(format!(
4313 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4314 msg.next_remote_commitment_number,
4315 our_commitment_transaction
4319 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4320 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4321 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4322 // the corresponding revoke_and_ack back yet.
4323 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4324 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4325 self.mark_awaiting_response();
4327 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4329 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4330 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4331 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4332 Some(msgs::ChannelReady {
4333 channel_id: self.context.channel_id(),
4334 next_per_commitment_point,
4335 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4339 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4340 if required_revoke.is_some() {
4341 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4343 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4346 Ok(ReestablishResponses {
4347 channel_ready, shutdown_msg, announcement_sigs,
4348 raa: required_revoke,
4349 commitment_update: None,
4350 order: self.context.resend_order.clone(),
4352 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4353 if required_revoke.is_some() {
4354 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4356 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4359 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4360 self.context.monitor_pending_commitment_signed = true;
4361 Ok(ReestablishResponses {
4362 channel_ready, shutdown_msg, announcement_sigs,
4363 commitment_update: None, raa: None,
4364 order: self.context.resend_order.clone(),
4367 Ok(ReestablishResponses {
4368 channel_ready, shutdown_msg, announcement_sigs,
4369 raa: required_revoke,
4370 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4371 order: self.context.resend_order.clone(),
4374 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4375 Err(ChannelError::Close(format!(
4376 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4377 msg.next_local_commitment_number,
4378 next_counterparty_commitment_number,
4381 Err(ChannelError::Close(format!(
4382 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4383 msg.next_local_commitment_number,
4384 next_counterparty_commitment_number,
4389 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4390 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4391 /// at which point they will be recalculated.
4392 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4394 where F::Target: FeeEstimator
4396 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4398 // Propose a range from our current Background feerate to our Normal feerate plus our
4399 // force_close_avoidance_max_fee_satoshis.
4400 // If we fail to come to consensus, we'll have to force-close.
4401 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4402 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4403 // that we don't expect to need fee bumping
4404 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4405 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4407 // The spec requires that (when the channel does not have anchors) we only send absolute
4408 // channel fees no greater than the absolute channel fee on the current commitment
4409 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4410 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4411 // some force-closure by old nodes, but we wanted to close the channel anyway.
4413 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4414 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4415 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4416 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4419 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4420 // below our dust limit, causing the output to disappear. We don't bother handling this
4421 // case, however, as this should only happen if a channel is closed before any (material)
4422 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4423 // come to consensus with our counterparty on appropriate fees, however it should be a
4424 // relatively rare case. We can revisit this later, though note that in order to determine
4425 // if the funders' output is dust we have to know the absolute fee we're going to use.
4426 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4427 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4428 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4429 // We always add force_close_avoidance_max_fee_satoshis to our normal
4430 // feerate-calculated fee, but allow the max to be overridden if we're using a
4431 // target feerate-calculated fee.
4432 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4433 proposed_max_feerate as u64 * tx_weight / 1000)
4435 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4438 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4439 self.context.closing_fee_limits.clone().unwrap()
4442 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4443 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4444 /// this point if we're the funder we should send the initial closing_signed, and in any case
4445 /// shutdown should complete within a reasonable timeframe.
4446 fn closing_negotiation_ready(&self) -> bool {
4447 self.context.closing_negotiation_ready()
4450 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4451 /// an Err if no progress is being made and the channel should be force-closed instead.
4452 /// Should be called on a one-minute timer.
4453 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4454 if self.closing_negotiation_ready() {
4455 if self.context.closing_signed_in_flight {
4456 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4458 self.context.closing_signed_in_flight = true;
4464 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4465 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4466 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4467 where F::Target: FeeEstimator, L::Target: Logger
4469 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4470 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4471 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4472 // that closing_negotiation_ready checks this case (as well as a few others).
4473 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4474 return Ok((None, None, None));
4477 if !self.context.is_outbound() {
4478 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4479 return self.closing_signed(fee_estimator, &msg);
4481 return Ok((None, None, None));
4484 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4485 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4486 if self.context.expecting_peer_commitment_signed {
4487 return Ok((None, None, None));
4490 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4492 assert!(self.context.shutdown_scriptpubkey.is_some());
4493 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4494 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4495 our_min_fee, our_max_fee, total_fee_satoshis);
4497 match &self.context.holder_signer {
4498 ChannelSignerType::Ecdsa(ecdsa) => {
4500 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4501 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4503 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4504 Ok((Some(msgs::ClosingSigned {
4505 channel_id: self.context.channel_id,
4506 fee_satoshis: total_fee_satoshis,
4508 fee_range: Some(msgs::ClosingSignedFeeRange {
4509 min_fee_satoshis: our_min_fee,
4510 max_fee_satoshis: our_max_fee,
4514 // TODO (taproot|arik)
4520 // Marks a channel as waiting for a response from the counterparty. If it's not received
4521 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4523 fn mark_awaiting_response(&mut self) {
4524 self.context.sent_message_awaiting_response = Some(0);
4527 /// Determines whether we should disconnect the counterparty due to not receiving a response
4528 /// within our expected timeframe.
4530 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4531 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4532 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4535 // Don't disconnect when we're not waiting on a response.
4538 *ticks_elapsed += 1;
4539 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4543 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4544 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4546 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4547 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4549 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4550 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4551 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4552 // can do that via error message without getting a connection fail anyway...
4553 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4555 for htlc in self.context.pending_inbound_htlcs.iter() {
4556 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4557 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4560 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4562 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4563 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4566 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4567 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4568 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4571 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4574 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4575 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4576 // any further commitment updates after we set LocalShutdownSent.
4577 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4579 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4582 assert!(send_shutdown);
4583 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4584 Ok(scriptpubkey) => scriptpubkey,
4585 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4587 if !shutdown_scriptpubkey.is_compatible(their_features) {
4588 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4590 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4595 // From here on out, we may not fail!
4597 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4598 self.context.update_time_counter += 1;
4600 let monitor_update = if update_shutdown_script {
4601 self.context.latest_monitor_update_id += 1;
4602 let monitor_update = ChannelMonitorUpdate {
4603 update_id: self.context.latest_monitor_update_id,
4604 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4605 scriptpubkey: self.get_closing_scriptpubkey(),
4608 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4609 self.push_ret_blockable_mon_update(monitor_update)
4611 let shutdown = if send_shutdown {
4612 Some(msgs::Shutdown {
4613 channel_id: self.context.channel_id,
4614 scriptpubkey: self.get_closing_scriptpubkey(),
4618 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4619 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4620 // cell HTLCs and return them to fail the payment.
4621 self.context.holding_cell_update_fee = None;
4622 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4623 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4625 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4626 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4633 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4634 self.context.update_time_counter += 1;
4636 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4639 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4640 let mut tx = closing_tx.trust().built_transaction().clone();
4642 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4644 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4645 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4646 let mut holder_sig = sig.serialize_der().to_vec();
4647 holder_sig.push(EcdsaSighashType::All as u8);
4648 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4649 cp_sig.push(EcdsaSighashType::All as u8);
4650 if funding_key[..] < counterparty_funding_key[..] {
4651 tx.input[0].witness.push(holder_sig);
4652 tx.input[0].witness.push(cp_sig);
4654 tx.input[0].witness.push(cp_sig);
4655 tx.input[0].witness.push(holder_sig);
4658 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4662 pub fn closing_signed<F: Deref>(
4663 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4664 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4665 where F::Target: FeeEstimator
4667 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4668 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4670 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4671 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4673 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4674 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4676 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4677 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4680 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4681 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4684 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4685 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4686 return Ok((None, None, None));
4689 let funding_redeemscript = self.context.get_funding_redeemscript();
4690 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4691 if used_total_fee != msg.fee_satoshis {
4692 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4694 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4696 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4699 // The remote end may have decided to revoke their output due to inconsistent dust
4700 // limits, so check for that case by re-checking the signature here.
4701 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4702 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4703 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4707 for outp in closing_tx.trust().built_transaction().output.iter() {
4708 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4709 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4713 assert!(self.context.shutdown_scriptpubkey.is_some());
4714 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4715 if last_fee == msg.fee_satoshis {
4716 let shutdown_result = ShutdownResult {
4717 monitor_update: None,
4718 dropped_outbound_htlcs: Vec::new(),
4719 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4720 channel_id: self.context.channel_id,
4721 counterparty_node_id: self.context.counterparty_node_id,
4723 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4724 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4725 self.context.update_time_counter += 1;
4726 return Ok((None, Some(tx), Some(shutdown_result)));
4730 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4732 macro_rules! propose_fee {
4733 ($new_fee: expr) => {
4734 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4735 (closing_tx, $new_fee)
4737 self.build_closing_transaction($new_fee, false)
4740 return match &self.context.holder_signer {
4741 ChannelSignerType::Ecdsa(ecdsa) => {
4743 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4744 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4745 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4746 let shutdown_result = ShutdownResult {
4747 monitor_update: None,
4748 dropped_outbound_htlcs: Vec::new(),
4749 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4750 channel_id: self.context.channel_id,
4751 counterparty_node_id: self.context.counterparty_node_id,
4753 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4754 self.context.update_time_counter += 1;
4755 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4756 (Some(tx), Some(shutdown_result))
4761 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4762 Ok((Some(msgs::ClosingSigned {
4763 channel_id: self.context.channel_id,
4764 fee_satoshis: used_fee,
4766 fee_range: Some(msgs::ClosingSignedFeeRange {
4767 min_fee_satoshis: our_min_fee,
4768 max_fee_satoshis: our_max_fee,
4770 }), signed_tx, shutdown_result))
4772 // TODO (taproot|arik)
4779 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4780 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4781 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4783 if max_fee_satoshis < our_min_fee {
4784 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4786 if min_fee_satoshis > our_max_fee {
4787 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4790 if !self.context.is_outbound() {
4791 // They have to pay, so pick the highest fee in the overlapping range.
4792 // We should never set an upper bound aside from their full balance
4793 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4794 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4796 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4797 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4798 msg.fee_satoshis, our_min_fee, our_max_fee)));
4800 // The proposed fee is in our acceptable range, accept it and broadcast!
4801 propose_fee!(msg.fee_satoshis);
4804 // Old fee style negotiation. We don't bother to enforce whether they are complying
4805 // with the "making progress" requirements, we just comply and hope for the best.
4806 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4807 if msg.fee_satoshis > last_fee {
4808 if msg.fee_satoshis < our_max_fee {
4809 propose_fee!(msg.fee_satoshis);
4810 } else if last_fee < our_max_fee {
4811 propose_fee!(our_max_fee);
4813 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4816 if msg.fee_satoshis > our_min_fee {
4817 propose_fee!(msg.fee_satoshis);
4818 } else if last_fee > our_min_fee {
4819 propose_fee!(our_min_fee);
4821 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4825 if msg.fee_satoshis < our_min_fee {
4826 propose_fee!(our_min_fee);
4827 } else if msg.fee_satoshis > our_max_fee {
4828 propose_fee!(our_max_fee);
4830 propose_fee!(msg.fee_satoshis);
4836 fn internal_htlc_satisfies_config(
4837 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4838 ) -> Result<(), (&'static str, u16)> {
4839 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4840 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4841 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4842 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4844 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4845 0x1000 | 12, // fee_insufficient
4848 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4850 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4851 0x1000 | 13, // incorrect_cltv_expiry
4857 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4858 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4859 /// unsuccessful, falls back to the previous one if one exists.
4860 pub fn htlc_satisfies_config(
4861 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4862 ) -> Result<(), (&'static str, u16)> {
4863 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4865 if let Some(prev_config) = self.context.prev_config() {
4866 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4873 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4874 self.context.cur_holder_commitment_transaction_number + 1
4877 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4878 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4881 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4882 self.context.cur_counterparty_commitment_transaction_number + 2
4886 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
4887 &self.context.holder_signer
4891 pub fn get_value_stat(&self) -> ChannelValueStat {
4893 value_to_self_msat: self.context.value_to_self_msat,
4894 channel_value_msat: self.context.channel_value_satoshis * 1000,
4895 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4896 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4897 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4898 holding_cell_outbound_amount_msat: {
4900 for h in self.context.holding_cell_htlc_updates.iter() {
4902 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4910 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4911 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4915 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4916 /// Allowed in any state (including after shutdown)
4917 pub fn is_awaiting_monitor_update(&self) -> bool {
4918 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4921 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4922 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4923 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4924 self.context.blocked_monitor_updates[0].update.update_id - 1
4927 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4928 /// further blocked monitor update exists after the next.
4929 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4930 if self.context.blocked_monitor_updates.is_empty() { return None; }
4931 Some((self.context.blocked_monitor_updates.remove(0).update,
4932 !self.context.blocked_monitor_updates.is_empty()))
4935 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4936 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4937 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4938 -> Option<ChannelMonitorUpdate> {
4939 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4940 if !release_monitor {
4941 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4950 pub fn blocked_monitor_updates_pending(&self) -> usize {
4951 self.context.blocked_monitor_updates.len()
4954 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4955 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4956 /// transaction. If the channel is inbound, this implies simply that the channel has not
4958 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4959 if !self.is_awaiting_monitor_update() { return false; }
4960 if self.context.channel_state &
4961 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4962 == ChannelState::FundingSent as u32 {
4963 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4964 // FundingSent set, though our peer could have sent their channel_ready.
4965 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4968 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4969 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4970 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4971 // waiting for the initial monitor persistence. Thus, we check if our commitment
4972 // transaction numbers have both been iterated only exactly once (for the
4973 // funding_signed), and we're awaiting monitor update.
4975 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4976 // only way to get an awaiting-monitor-update state during initial funding is if the
4977 // initial monitor persistence is still pending).
4979 // Because deciding we're awaiting initial broadcast spuriously could result in
4980 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4981 // we hard-assert here, even in production builds.
4982 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4983 assert!(self.context.monitor_pending_channel_ready);
4984 assert_eq!(self.context.latest_monitor_update_id, 0);
4990 /// Returns true if our channel_ready has been sent
4991 pub fn is_our_channel_ready(&self) -> bool {
4992 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4995 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4996 pub fn received_shutdown(&self) -> bool {
4997 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
5000 /// Returns true if we either initiated or agreed to shut down the channel.
5001 pub fn sent_shutdown(&self) -> bool {
5002 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
5005 /// Returns true if this channel is fully shut down. True here implies that no further actions
5006 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5007 /// will be handled appropriately by the chain monitor.
5008 pub fn is_shutdown(&self) -> bool {
5009 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
5010 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
5015 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5016 self.context.channel_update_status
5019 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5020 self.context.update_time_counter += 1;
5021 self.context.channel_update_status = status;
5024 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5026 // * always when a new block/transactions are confirmed with the new height
5027 // * when funding is signed with a height of 0
5028 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5032 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5033 if funding_tx_confirmations <= 0 {
5034 self.context.funding_tx_confirmation_height = 0;
5037 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5041 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5042 // channel_ready yet.
5043 if self.context.signer_pending_funding {
5047 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5048 // channel_ready until the entire batch is ready.
5049 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5050 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5051 self.context.channel_state |= ChannelState::OurChannelReady as u32;
5053 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5054 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
5055 self.context.update_time_counter += 1;
5057 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5058 // We got a reorg but not enough to trigger a force close, just ignore.
5061 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
5062 // We should never see a funding transaction on-chain until we've received
5063 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5064 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5065 // however, may do this and we shouldn't treat it as a bug.
5066 #[cfg(not(fuzzing))]
5067 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5068 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5069 self.context.channel_state);
5071 // We got a reorg but not enough to trigger a force close, just ignore.
5075 if need_commitment_update {
5076 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5077 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5078 let next_per_commitment_point =
5079 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5080 return Some(msgs::ChannelReady {
5081 channel_id: self.context.channel_id,
5082 next_per_commitment_point,
5083 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5087 self.context.monitor_pending_channel_ready = true;
5093 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5094 /// In the first case, we store the confirmation height and calculating the short channel id.
5095 /// In the second, we simply return an Err indicating we need to be force-closed now.
5096 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5097 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5098 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5099 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5101 NS::Target: NodeSigner,
5104 let mut msgs = (None, None);
5105 if let Some(funding_txo) = self.context.get_funding_txo() {
5106 for &(index_in_block, tx) in txdata.iter() {
5107 // Check if the transaction is the expected funding transaction, and if it is,
5108 // check that it pays the right amount to the right script.
5109 if self.context.funding_tx_confirmation_height == 0 {
5110 if tx.txid() == funding_txo.txid {
5111 let txo_idx = funding_txo.index as usize;
5112 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5113 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5114 if self.context.is_outbound() {
5115 // If we generated the funding transaction and it doesn't match what it
5116 // should, the client is really broken and we should just panic and
5117 // tell them off. That said, because hash collisions happen with high
5118 // probability in fuzzing mode, if we're fuzzing we just close the
5119 // channel and move on.
5120 #[cfg(not(fuzzing))]
5121 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5123 self.context.update_time_counter += 1;
5124 let err_reason = "funding tx had wrong script/value or output index";
5125 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5127 if self.context.is_outbound() {
5128 if !tx.is_coin_base() {
5129 for input in tx.input.iter() {
5130 if input.witness.is_empty() {
5131 // We generated a malleable funding transaction, implying we've
5132 // just exposed ourselves to funds loss to our counterparty.
5133 #[cfg(not(fuzzing))]
5134 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5139 self.context.funding_tx_confirmation_height = height;
5140 self.context.funding_tx_confirmed_in = Some(*block_hash);
5141 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5142 Ok(scid) => Some(scid),
5143 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5146 // If this is a coinbase transaction and not a 0-conf channel
5147 // we should update our min_depth to 100 to handle coinbase maturity
5148 if tx.is_coin_base() &&
5149 self.context.minimum_depth.unwrap_or(0) > 0 &&
5150 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5151 self.context.minimum_depth = Some(COINBASE_MATURITY);
5154 // If we allow 1-conf funding, we may need to check for channel_ready here and
5155 // send it immediately instead of waiting for a best_block_updated call (which
5156 // may have already happened for this block).
5157 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5158 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5159 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5160 msgs = (Some(channel_ready), announcement_sigs);
5163 for inp in tx.input.iter() {
5164 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5165 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5166 return Err(ClosureReason::CommitmentTxConfirmed);
5174 /// When a new block is connected, we check the height of the block against outbound holding
5175 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5176 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5177 /// handled by the ChannelMonitor.
5179 /// If we return Err, the channel may have been closed, at which point the standard
5180 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5183 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5185 pub fn best_block_updated<NS: Deref, L: Deref>(
5186 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5187 node_signer: &NS, user_config: &UserConfig, logger: &L
5188 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5190 NS::Target: NodeSigner,
5193 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5196 fn do_best_block_updated<NS: Deref, L: Deref>(
5197 &mut self, height: u32, highest_header_time: u32,
5198 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5199 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5201 NS::Target: NodeSigner,
5204 let mut timed_out_htlcs = Vec::new();
5205 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5206 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5208 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5209 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5211 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5212 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5213 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5221 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5223 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5224 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5225 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5227 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5228 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5231 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5232 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5233 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5234 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5235 if self.context.funding_tx_confirmation_height == 0 {
5236 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5237 // zero if it has been reorged out, however in either case, our state flags
5238 // indicate we've already sent a channel_ready
5239 funding_tx_confirmations = 0;
5242 // If we've sent channel_ready (or have both sent and received channel_ready), and
5243 // the funding transaction has become unconfirmed,
5244 // close the channel and hope we can get the latest state on chain (because presumably
5245 // the funding transaction is at least still in the mempool of most nodes).
5247 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5248 // 0-conf channel, but not doing so may lead to the
5249 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5251 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5252 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5253 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5254 return Err(ClosureReason::ProcessingError { err: err_reason });
5256 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5257 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5258 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5259 // If funding_tx_confirmed_in is unset, the channel must not be active
5260 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5261 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5262 return Err(ClosureReason::FundingTimedOut);
5265 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5266 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5268 Ok((None, timed_out_htlcs, announcement_sigs))
5271 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5272 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5273 /// before the channel has reached channel_ready and we can just wait for more blocks.
5274 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5275 if self.context.funding_tx_confirmation_height != 0 {
5276 // We handle the funding disconnection by calling best_block_updated with a height one
5277 // below where our funding was connected, implying a reorg back to conf_height - 1.
5278 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5279 // We use the time field to bump the current time we set on channel updates if its
5280 // larger. If we don't know that time has moved forward, we can just set it to the last
5281 // time we saw and it will be ignored.
5282 let best_time = self.context.update_time_counter;
5283 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5284 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5285 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5286 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5287 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5293 // We never learned about the funding confirmation anyway, just ignore
5298 // Methods to get unprompted messages to send to the remote end (or where we already returned
5299 // something in the handler for the message that prompted this message):
5301 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5302 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5303 /// directions). Should be used for both broadcasted announcements and in response to an
5304 /// AnnouncementSignatures message from the remote peer.
5306 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5309 /// This will only return ChannelError::Ignore upon failure.
5311 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5312 fn get_channel_announcement<NS: Deref>(
5313 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5314 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5315 if !self.context.config.announced_channel {
5316 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5318 if !self.context.is_usable() {
5319 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5322 let short_channel_id = self.context.get_short_channel_id()
5323 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5324 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5325 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5326 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5327 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5329 let msg = msgs::UnsignedChannelAnnouncement {
5330 features: channelmanager::provided_channel_features(&user_config),
5333 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5334 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5335 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5336 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5337 excess_data: Vec::new(),
5343 fn get_announcement_sigs<NS: Deref, L: Deref>(
5344 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5345 best_block_height: u32, logger: &L
5346 ) -> Option<msgs::AnnouncementSignatures>
5348 NS::Target: NodeSigner,
5351 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5355 if !self.context.is_usable() {
5359 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5360 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5364 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5368 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5369 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5372 log_trace!(logger, "{:?}", e);
5376 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5378 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5383 match &self.context.holder_signer {
5384 ChannelSignerType::Ecdsa(ecdsa) => {
5385 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5387 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5392 let short_channel_id = match self.context.get_short_channel_id() {
5394 None => return None,
5397 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5399 Some(msgs::AnnouncementSignatures {
5400 channel_id: self.context.channel_id(),
5402 node_signature: our_node_sig,
5403 bitcoin_signature: our_bitcoin_sig,
5406 // TODO (taproot|arik)
5412 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5414 fn sign_channel_announcement<NS: Deref>(
5415 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5416 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5417 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5418 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5419 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5420 let were_node_one = announcement.node_id_1 == our_node_key;
5422 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5423 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5424 match &self.context.holder_signer {
5425 ChannelSignerType::Ecdsa(ecdsa) => {
5426 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5427 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5428 Ok(msgs::ChannelAnnouncement {
5429 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5430 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5431 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5432 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5433 contents: announcement,
5436 // TODO (taproot|arik)
5441 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5445 /// Processes an incoming announcement_signatures message, providing a fully-signed
5446 /// channel_announcement message which we can broadcast and storing our counterparty's
5447 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5448 pub fn announcement_signatures<NS: Deref>(
5449 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5450 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5451 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5452 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5454 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5456 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5457 return Err(ChannelError::Close(format!(
5458 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5459 &announcement, self.context.get_counterparty_node_id())));
5461 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5462 return Err(ChannelError::Close(format!(
5463 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5464 &announcement, self.context.counterparty_funding_pubkey())));
5467 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5468 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5469 return Err(ChannelError::Ignore(
5470 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5473 self.sign_channel_announcement(node_signer, announcement)
5476 /// Gets a signed channel_announcement for this channel, if we previously received an
5477 /// announcement_signatures from our counterparty.
5478 pub fn get_signed_channel_announcement<NS: Deref>(
5479 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5480 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5481 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5484 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5486 Err(_) => return None,
5488 match self.sign_channel_announcement(node_signer, announcement) {
5489 Ok(res) => Some(res),
5494 /// May panic if called on a channel that wasn't immediately-previously
5495 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5496 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5497 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5498 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5499 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5500 // current to_remote balances. However, it no longer has any use, and thus is now simply
5501 // set to a dummy (but valid, as required by the spec) public key.
5502 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5503 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5504 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5505 let mut pk = [2; 33]; pk[1] = 0xff;
5506 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5507 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5508 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5509 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5512 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5515 self.mark_awaiting_response();
5516 msgs::ChannelReestablish {
5517 channel_id: self.context.channel_id(),
5518 // The protocol has two different commitment number concepts - the "commitment
5519 // transaction number", which starts from 0 and counts up, and the "revocation key
5520 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5521 // commitment transaction numbers by the index which will be used to reveal the
5522 // revocation key for that commitment transaction, which means we have to convert them
5523 // to protocol-level commitment numbers here...
5525 // next_local_commitment_number is the next commitment_signed number we expect to
5526 // receive (indicating if they need to resend one that we missed).
5527 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5528 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5529 // receive, however we track it by the next commitment number for a remote transaction
5530 // (which is one further, as they always revoke previous commitment transaction, not
5531 // the one we send) so we have to decrement by 1. Note that if
5532 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5533 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5535 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5536 your_last_per_commitment_secret: remote_last_secret,
5537 my_current_per_commitment_point: dummy_pubkey,
5538 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5539 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5540 // txid of that interactive transaction, else we MUST NOT set it.
5541 next_funding_txid: None,
5546 // Send stuff to our remote peers:
5548 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5549 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5550 /// commitment update.
5552 /// `Err`s will only be [`ChannelError::Ignore`].
5553 pub fn queue_add_htlc<F: Deref, L: Deref>(
5554 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5555 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5556 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5557 ) -> Result<(), ChannelError>
5558 where F::Target: FeeEstimator, L::Target: Logger
5561 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5562 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5563 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5565 if let ChannelError::Ignore(_) = err { /* fine */ }
5566 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5571 /// Adds a pending outbound HTLC to this channel, note that you probably want
5572 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5574 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5576 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5577 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5579 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5580 /// we may not yet have sent the previous commitment update messages and will need to
5581 /// regenerate them.
5583 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5584 /// on this [`Channel`] if `force_holding_cell` is false.
5586 /// `Err`s will only be [`ChannelError::Ignore`].
5587 fn send_htlc<F: Deref, L: Deref>(
5588 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5589 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5590 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5591 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5592 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5593 where F::Target: FeeEstimator, L::Target: Logger
5595 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5596 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5598 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5599 if amount_msat > channel_total_msat {
5600 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5603 if amount_msat == 0 {
5604 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5607 let available_balances = self.context.get_available_balances(fee_estimator);
5608 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5609 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5610 available_balances.next_outbound_htlc_minimum_msat)));
5613 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5614 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5615 available_balances.next_outbound_htlc_limit_msat)));
5618 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5619 // Note that this should never really happen, if we're !is_live() on receipt of an
5620 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5621 // the user to send directly into a !is_live() channel. However, if we
5622 // disconnected during the time the previous hop was doing the commitment dance we may
5623 // end up getting here after the forwarding delay. In any case, returning an
5624 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5625 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5628 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5629 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5630 payment_hash, amount_msat,
5631 if force_holding_cell { "into holding cell" }
5632 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5633 else { "to peer" });
5635 if need_holding_cell {
5636 force_holding_cell = true;
5639 // Now update local state:
5640 if force_holding_cell {
5641 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5646 onion_routing_packet,
5653 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5654 htlc_id: self.context.next_holder_htlc_id,
5656 payment_hash: payment_hash.clone(),
5658 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5664 let res = msgs::UpdateAddHTLC {
5665 channel_id: self.context.channel_id,
5666 htlc_id: self.context.next_holder_htlc_id,
5670 onion_routing_packet,
5674 self.context.next_holder_htlc_id += 1;
5679 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5680 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5681 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5682 // fail to generate this, we still are at least at a position where upgrading their status
5684 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5685 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5686 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5688 if let Some(state) = new_state {
5689 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5693 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5694 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5695 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5696 // Grab the preimage, if it exists, instead of cloning
5697 let mut reason = OutboundHTLCOutcome::Success(None);
5698 mem::swap(outcome, &mut reason);
5699 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5702 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5703 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5704 debug_assert!(!self.context.is_outbound());
5705 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5706 self.context.feerate_per_kw = feerate;
5707 self.context.pending_update_fee = None;
5710 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5712 let (mut htlcs_ref, counterparty_commitment_tx) =
5713 self.build_commitment_no_state_update(logger);
5714 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5715 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5716 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5718 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5719 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5722 self.context.latest_monitor_update_id += 1;
5723 let monitor_update = ChannelMonitorUpdate {
5724 update_id: self.context.latest_monitor_update_id,
5725 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5726 commitment_txid: counterparty_commitment_txid,
5727 htlc_outputs: htlcs.clone(),
5728 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5729 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5730 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5731 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5732 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5735 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5739 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5740 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5741 where L::Target: Logger
5743 let counterparty_keys = self.context.build_remote_transaction_keys();
5744 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5745 let counterparty_commitment_tx = commitment_stats.tx;
5747 #[cfg(any(test, fuzzing))]
5749 if !self.context.is_outbound() {
5750 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5751 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5752 if let Some(info) = projected_commit_tx_info {
5753 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5754 if info.total_pending_htlcs == total_pending_htlcs
5755 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5756 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5757 && info.feerate == self.context.feerate_per_kw {
5758 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5759 assert_eq!(actual_fee, info.fee);
5765 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5768 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5769 /// generation when we shouldn't change HTLC/channel state.
5770 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5771 // Get the fee tests from `build_commitment_no_state_update`
5772 #[cfg(any(test, fuzzing))]
5773 self.build_commitment_no_state_update(logger);
5775 let counterparty_keys = self.context.build_remote_transaction_keys();
5776 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5777 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5779 match &self.context.holder_signer {
5780 ChannelSignerType::Ecdsa(ecdsa) => {
5781 let (signature, htlc_signatures);
5784 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5785 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5789 let res = ecdsa.sign_counterparty_commitment(
5790 &commitment_stats.tx,
5791 commitment_stats.inbound_htlc_preimages,
5792 commitment_stats.outbound_htlc_preimages,
5793 &self.context.secp_ctx,
5794 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5796 htlc_signatures = res.1;
5798 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5799 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5800 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5801 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5803 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5804 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5805 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5806 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5807 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5808 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5812 Ok((msgs::CommitmentSigned {
5813 channel_id: self.context.channel_id,
5817 partial_signature_with_nonce: None,
5818 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5820 // TODO (taproot|arik)
5826 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5827 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5829 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5830 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5831 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5832 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5833 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5834 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5835 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5836 where F::Target: FeeEstimator, L::Target: Logger
5838 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5839 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
5840 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5843 let monitor_update = self.build_commitment_no_status_check(logger);
5844 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5845 Ok(self.push_ret_blockable_mon_update(monitor_update))
5851 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5853 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5854 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5855 fee_base_msat: msg.contents.fee_base_msat,
5856 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5857 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5859 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5861 self.context.counterparty_forwarding_info = new_forwarding_info;
5867 /// Begins the shutdown process, getting a message for the remote peer and returning all
5868 /// holding cell HTLCs for payment failure.
5870 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5871 /// [`ChannelMonitorUpdate`] will be returned).
5872 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5873 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5874 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5876 for htlc in self.context.pending_outbound_htlcs.iter() {
5877 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5878 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5881 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5882 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5883 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5885 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5886 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5889 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5890 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5892 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5893 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5894 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5897 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5898 // script is set, we just force-close and call it a day.
5899 let mut chan_closed = false;
5900 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5904 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5906 None if !chan_closed => {
5907 // use override shutdown script if provided
5908 let shutdown_scriptpubkey = match override_shutdown_script {
5909 Some(script) => script,
5911 // otherwise, use the shutdown scriptpubkey provided by the signer
5912 match signer_provider.get_shutdown_scriptpubkey() {
5913 Ok(scriptpubkey) => scriptpubkey,
5914 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5918 if !shutdown_scriptpubkey.is_compatible(their_features) {
5919 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5921 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5927 // From here on out, we may not fail!
5928 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5929 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5930 let shutdown_result = ShutdownResult {
5931 monitor_update: None,
5932 dropped_outbound_htlcs: Vec::new(),
5933 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5934 channel_id: self.context.channel_id,
5935 counterparty_node_id: self.context.counterparty_node_id,
5937 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5938 Some(shutdown_result)
5940 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5943 self.context.update_time_counter += 1;
5945 let monitor_update = if update_shutdown_script {
5946 self.context.latest_monitor_update_id += 1;
5947 let monitor_update = ChannelMonitorUpdate {
5948 update_id: self.context.latest_monitor_update_id,
5949 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5950 scriptpubkey: self.get_closing_scriptpubkey(),
5953 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5954 self.push_ret_blockable_mon_update(monitor_update)
5956 let shutdown = msgs::Shutdown {
5957 channel_id: self.context.channel_id,
5958 scriptpubkey: self.get_closing_scriptpubkey(),
5961 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5962 // our shutdown until we've committed all of the pending changes.
5963 self.context.holding_cell_update_fee = None;
5964 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5965 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5967 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5968 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5975 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5976 "we can't both complete shutdown and return a monitor update");
5978 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5981 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5982 self.context.holding_cell_htlc_updates.iter()
5983 .flat_map(|htlc_update| {
5985 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5986 => Some((source, payment_hash)),
5990 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5994 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5995 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5996 pub context: ChannelContext<SP>,
5997 pub unfunded_context: UnfundedChannelContext,
6000 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6001 pub fn new<ES: Deref, F: Deref>(
6002 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6003 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6004 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6005 ) -> Result<OutboundV1Channel<SP>, APIError>
6006 where ES::Target: EntropySource,
6007 F::Target: FeeEstimator
6009 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6010 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6011 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6012 let pubkeys = holder_signer.pubkeys().clone();
6014 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6015 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6017 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6018 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6020 let channel_value_msat = channel_value_satoshis * 1000;
6021 if push_msat > channel_value_msat {
6022 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6024 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6025 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6027 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6028 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6029 // Protocol level safety check in place, although it should never happen because
6030 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6031 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6034 let channel_type = Self::get_initial_channel_type(&config, their_features);
6035 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6037 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6038 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6040 (ConfirmationTarget::NonAnchorChannelFee, 0)
6042 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6044 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6045 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6046 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6047 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6050 let mut secp_ctx = Secp256k1::new();
6051 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6053 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6054 match signer_provider.get_shutdown_scriptpubkey() {
6055 Ok(scriptpubkey) => Some(scriptpubkey),
6056 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6060 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6061 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6062 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6066 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6067 Ok(script) => script,
6068 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6071 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6074 context: ChannelContext {
6077 config: LegacyChannelConfig {
6078 options: config.channel_config.clone(),
6079 announced_channel: config.channel_handshake_config.announced_channel,
6080 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6085 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6087 channel_id: temporary_channel_id,
6088 temporary_channel_id: Some(temporary_channel_id),
6089 channel_state: ChannelState::OurInitSent as u32,
6090 announcement_sigs_state: AnnouncementSigsState::NotSent,
6092 channel_value_satoshis,
6094 latest_monitor_update_id: 0,
6096 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6097 shutdown_scriptpubkey,
6100 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6101 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6104 pending_inbound_htlcs: Vec::new(),
6105 pending_outbound_htlcs: Vec::new(),
6106 holding_cell_htlc_updates: Vec::new(),
6107 pending_update_fee: None,
6108 holding_cell_update_fee: None,
6109 next_holder_htlc_id: 0,
6110 next_counterparty_htlc_id: 0,
6111 update_time_counter: 1,
6113 resend_order: RAACommitmentOrder::CommitmentFirst,
6115 monitor_pending_channel_ready: false,
6116 monitor_pending_revoke_and_ack: false,
6117 monitor_pending_commitment_signed: false,
6118 monitor_pending_forwards: Vec::new(),
6119 monitor_pending_failures: Vec::new(),
6120 monitor_pending_finalized_fulfills: Vec::new(),
6122 signer_pending_commitment_update: false,
6123 signer_pending_funding: false,
6125 #[cfg(debug_assertions)]
6126 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6127 #[cfg(debug_assertions)]
6128 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6130 last_sent_closing_fee: None,
6131 pending_counterparty_closing_signed: None,
6132 expecting_peer_commitment_signed: false,
6133 closing_fee_limits: None,
6134 target_closing_feerate_sats_per_kw: None,
6136 funding_tx_confirmed_in: None,
6137 funding_tx_confirmation_height: 0,
6138 short_channel_id: None,
6139 channel_creation_height: current_chain_height,
6141 feerate_per_kw: commitment_feerate,
6142 counterparty_dust_limit_satoshis: 0,
6143 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6144 counterparty_max_htlc_value_in_flight_msat: 0,
6145 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6146 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6147 holder_selected_channel_reserve_satoshis,
6148 counterparty_htlc_minimum_msat: 0,
6149 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6150 counterparty_max_accepted_htlcs: 0,
6151 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6152 minimum_depth: None, // Filled in in accept_channel
6154 counterparty_forwarding_info: None,
6156 channel_transaction_parameters: ChannelTransactionParameters {
6157 holder_pubkeys: pubkeys,
6158 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6159 is_outbound_from_holder: true,
6160 counterparty_parameters: None,
6161 funding_outpoint: None,
6162 channel_type_features: channel_type.clone()
6164 funding_transaction: None,
6165 is_batch_funding: None,
6167 counterparty_cur_commitment_point: None,
6168 counterparty_prev_commitment_point: None,
6169 counterparty_node_id,
6171 counterparty_shutdown_scriptpubkey: None,
6173 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6175 channel_update_status: ChannelUpdateStatus::Enabled,
6176 closing_signed_in_flight: false,
6178 announcement_sigs: None,
6180 #[cfg(any(test, fuzzing))]
6181 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6182 #[cfg(any(test, fuzzing))]
6183 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6185 workaround_lnd_bug_4006: None,
6186 sent_message_awaiting_response: None,
6188 latest_inbound_scid_alias: None,
6189 outbound_scid_alias,
6191 channel_pending_event_emitted: false,
6192 channel_ready_event_emitted: false,
6194 #[cfg(any(test, fuzzing))]
6195 historical_inbound_htlc_fulfills: HashSet::new(),
6200 blocked_monitor_updates: Vec::new(),
6202 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6206 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6207 /// a funding_created message for the remote peer.
6208 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6209 /// or if called on an inbound channel.
6210 /// Note that channel_id changes during this call!
6211 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6212 /// If an Err is returned, it is a ChannelError::Close.
6213 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6214 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6215 if !self.context.is_outbound() {
6216 panic!("Tried to create outbound funding_created message on an inbound channel!");
6218 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6219 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6221 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6222 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6223 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6224 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6227 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6228 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6230 // Now that we're past error-generating stuff, update our local state:
6232 self.context.channel_state = ChannelState::FundingCreated as u32;
6233 self.context.channel_id = funding_txo.to_channel_id();
6235 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6236 // We can skip this if it is a zero-conf channel.
6237 if funding_transaction.is_coin_base() &&
6238 self.context.minimum_depth.unwrap_or(0) > 0 &&
6239 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6240 self.context.minimum_depth = Some(COINBASE_MATURITY);
6243 self.context.funding_transaction = Some(funding_transaction);
6244 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6246 let funding_created = self.context.get_funding_created_msg(logger);
6247 if funding_created.is_none() {
6248 if !self.context.signer_pending_funding {
6249 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6250 self.context.signer_pending_funding = true;
6254 let channel = Channel {
6255 context: self.context,
6258 Ok((channel, funding_created))
6261 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6262 // The default channel type (ie the first one we try) depends on whether the channel is
6263 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6264 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6265 // with no other changes, and fall back to `only_static_remotekey`.
6266 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6267 if !config.channel_handshake_config.announced_channel &&
6268 config.channel_handshake_config.negotiate_scid_privacy &&
6269 their_features.supports_scid_privacy() {
6270 ret.set_scid_privacy_required();
6273 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6274 // set it now. If they don't understand it, we'll fall back to our default of
6275 // `only_static_remotekey`.
6276 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6277 their_features.supports_anchors_zero_fee_htlc_tx() {
6278 ret.set_anchors_zero_fee_htlc_tx_required();
6284 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6285 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6286 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6287 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6288 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6289 ) -> Result<msgs::OpenChannel, ()>
6291 F::Target: FeeEstimator
6293 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6294 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6295 // We've exhausted our options
6298 // We support opening a few different types of channels. Try removing our additional
6299 // features one by one until we've either arrived at our default or the counterparty has
6302 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6303 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6304 // checks whether the counterparty supports every feature, this would only happen if the
6305 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6307 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6308 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6309 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6310 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6311 } else if self.context.channel_type.supports_scid_privacy() {
6312 self.context.channel_type.clear_scid_privacy();
6314 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6316 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6317 Ok(self.get_open_channel(chain_hash))
6320 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6321 if !self.context.is_outbound() {
6322 panic!("Tried to open a channel for an inbound channel?");
6324 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6325 panic!("Cannot generate an open_channel after we've moved forward");
6328 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6329 panic!("Tried to send an open_channel for a channel that has already advanced");
6332 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6333 let keys = self.context.get_holder_pubkeys();
6337 temporary_channel_id: self.context.channel_id,
6338 funding_satoshis: self.context.channel_value_satoshis,
6339 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6340 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6341 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6342 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6343 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6344 feerate_per_kw: self.context.feerate_per_kw as u32,
6345 to_self_delay: self.context.get_holder_selected_contest_delay(),
6346 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6347 funding_pubkey: keys.funding_pubkey,
6348 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6349 payment_point: keys.payment_point,
6350 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6351 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6352 first_per_commitment_point,
6353 channel_flags: if self.context.config.announced_channel {1} else {0},
6354 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6355 Some(script) => script.clone().into_inner(),
6356 None => Builder::new().into_script(),
6358 channel_type: Some(self.context.channel_type.clone()),
6363 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6364 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6366 // Check sanity of message fields:
6367 if !self.context.is_outbound() {
6368 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6370 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6371 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6373 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6374 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6376 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6377 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6379 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6380 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6382 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6383 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6384 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6386 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6387 if msg.htlc_minimum_msat >= full_channel_value_msat {
6388 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6390 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6391 if msg.to_self_delay > max_delay_acceptable {
6392 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6394 if msg.max_accepted_htlcs < 1 {
6395 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6397 if msg.max_accepted_htlcs > MAX_HTLCS {
6398 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6401 // Now check against optional parameters as set by config...
6402 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6403 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6405 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6406 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6408 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6409 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6411 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6412 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6414 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6415 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6417 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6418 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6420 if msg.minimum_depth > peer_limits.max_minimum_depth {
6421 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6424 if let Some(ty) = &msg.channel_type {
6425 if *ty != self.context.channel_type {
6426 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6428 } else if their_features.supports_channel_type() {
6429 // Assume they've accepted the channel type as they said they understand it.
6431 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6432 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6433 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6435 self.context.channel_type = channel_type.clone();
6436 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6439 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6440 match &msg.shutdown_scriptpubkey {
6441 &Some(ref script) => {
6442 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6443 if script.len() == 0 {
6446 if !script::is_bolt2_compliant(&script, their_features) {
6447 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6449 Some(script.clone())
6452 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6454 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6459 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6460 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6461 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6462 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6463 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6465 if peer_limits.trust_own_funding_0conf {
6466 self.context.minimum_depth = Some(msg.minimum_depth);
6468 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6471 let counterparty_pubkeys = ChannelPublicKeys {
6472 funding_pubkey: msg.funding_pubkey,
6473 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6474 payment_point: msg.payment_point,
6475 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6476 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6479 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6480 selected_contest_delay: msg.to_self_delay,
6481 pubkeys: counterparty_pubkeys,
6484 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6485 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6487 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6488 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6494 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6495 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6496 pub context: ChannelContext<SP>,
6497 pub unfunded_context: UnfundedChannelContext,
6500 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6501 /// Creates a new channel from a remote sides' request for one.
6502 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6503 pub fn new<ES: Deref, F: Deref, L: Deref>(
6504 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6505 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6506 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6507 current_chain_height: u32, logger: &L, is_0conf: bool,
6508 ) -> Result<InboundV1Channel<SP>, ChannelError>
6509 where ES::Target: EntropySource,
6510 F::Target: FeeEstimator,
6513 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6514 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6516 // First check the channel type is known, failing before we do anything else if we don't
6517 // support this channel type.
6518 let channel_type = if let Some(channel_type) = &msg.channel_type {
6519 if channel_type.supports_any_optional_bits() {
6520 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6523 // We only support the channel types defined by the `ChannelManager` in
6524 // `provided_channel_type_features`. The channel type must always support
6525 // `static_remote_key`.
6526 if !channel_type.requires_static_remote_key() {
6527 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6529 // Make sure we support all of the features behind the channel type.
6530 if !channel_type.is_subset(our_supported_features) {
6531 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6533 if channel_type.requires_scid_privacy() && announced_channel {
6534 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6536 channel_type.clone()
6538 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6539 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6540 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6545 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6546 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6547 let pubkeys = holder_signer.pubkeys().clone();
6548 let counterparty_pubkeys = ChannelPublicKeys {
6549 funding_pubkey: msg.funding_pubkey,
6550 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6551 payment_point: msg.payment_point,
6552 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6553 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6556 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6557 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6560 // Check sanity of message fields:
6561 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6562 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6564 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6565 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6567 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6568 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6570 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6571 if msg.push_msat > full_channel_value_msat {
6572 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6574 if msg.dust_limit_satoshis > msg.funding_satoshis {
6575 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6577 if msg.htlc_minimum_msat >= full_channel_value_msat {
6578 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6580 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6582 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6583 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6584 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6586 if msg.max_accepted_htlcs < 1 {
6587 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6589 if msg.max_accepted_htlcs > MAX_HTLCS {
6590 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6593 // Now check against optional parameters as set by config...
6594 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6595 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6597 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6598 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6600 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6601 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6603 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6604 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6606 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6607 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6609 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6610 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6612 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6613 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6616 // Convert things into internal flags and prep our state:
6618 if config.channel_handshake_limits.force_announced_channel_preference {
6619 if config.channel_handshake_config.announced_channel != announced_channel {
6620 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6624 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6625 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6626 // Protocol level safety check in place, although it should never happen because
6627 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6628 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6630 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6631 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6633 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6634 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6635 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6637 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6638 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6641 // check if the funder's amount for the initial commitment tx is sufficient
6642 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6643 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6644 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6648 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6649 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6650 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6651 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6654 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6655 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6656 // want to push much to us), our counterparty should always have more than our reserve.
6657 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6658 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6661 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6662 match &msg.shutdown_scriptpubkey {
6663 &Some(ref script) => {
6664 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6665 if script.len() == 0 {
6668 if !script::is_bolt2_compliant(&script, their_features) {
6669 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6671 Some(script.clone())
6674 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6676 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6681 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6682 match signer_provider.get_shutdown_scriptpubkey() {
6683 Ok(scriptpubkey) => Some(scriptpubkey),
6684 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6688 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6689 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6690 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6694 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6695 Ok(script) => script,
6696 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6699 let mut secp_ctx = Secp256k1::new();
6700 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6702 let minimum_depth = if is_0conf {
6705 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6709 context: ChannelContext {
6712 config: LegacyChannelConfig {
6713 options: config.channel_config.clone(),
6715 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6720 inbound_handshake_limits_override: None,
6722 temporary_channel_id: Some(msg.temporary_channel_id),
6723 channel_id: msg.temporary_channel_id,
6724 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6725 announcement_sigs_state: AnnouncementSigsState::NotSent,
6728 latest_monitor_update_id: 0,
6730 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6731 shutdown_scriptpubkey,
6734 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6735 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6736 value_to_self_msat: msg.push_msat,
6738 pending_inbound_htlcs: Vec::new(),
6739 pending_outbound_htlcs: Vec::new(),
6740 holding_cell_htlc_updates: Vec::new(),
6741 pending_update_fee: None,
6742 holding_cell_update_fee: None,
6743 next_holder_htlc_id: 0,
6744 next_counterparty_htlc_id: 0,
6745 update_time_counter: 1,
6747 resend_order: RAACommitmentOrder::CommitmentFirst,
6749 monitor_pending_channel_ready: false,
6750 monitor_pending_revoke_and_ack: false,
6751 monitor_pending_commitment_signed: false,
6752 monitor_pending_forwards: Vec::new(),
6753 monitor_pending_failures: Vec::new(),
6754 monitor_pending_finalized_fulfills: Vec::new(),
6756 signer_pending_commitment_update: false,
6757 signer_pending_funding: false,
6759 #[cfg(debug_assertions)]
6760 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6761 #[cfg(debug_assertions)]
6762 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6764 last_sent_closing_fee: None,
6765 pending_counterparty_closing_signed: None,
6766 expecting_peer_commitment_signed: false,
6767 closing_fee_limits: None,
6768 target_closing_feerate_sats_per_kw: None,
6770 funding_tx_confirmed_in: None,
6771 funding_tx_confirmation_height: 0,
6772 short_channel_id: None,
6773 channel_creation_height: current_chain_height,
6775 feerate_per_kw: msg.feerate_per_kw,
6776 channel_value_satoshis: msg.funding_satoshis,
6777 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6778 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6779 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6780 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6781 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6782 holder_selected_channel_reserve_satoshis,
6783 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6784 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6785 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6786 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6789 counterparty_forwarding_info: None,
6791 channel_transaction_parameters: ChannelTransactionParameters {
6792 holder_pubkeys: pubkeys,
6793 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6794 is_outbound_from_holder: false,
6795 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6796 selected_contest_delay: msg.to_self_delay,
6797 pubkeys: counterparty_pubkeys,
6799 funding_outpoint: None,
6800 channel_type_features: channel_type.clone()
6802 funding_transaction: None,
6803 is_batch_funding: None,
6805 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6806 counterparty_prev_commitment_point: None,
6807 counterparty_node_id,
6809 counterparty_shutdown_scriptpubkey,
6811 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6813 channel_update_status: ChannelUpdateStatus::Enabled,
6814 closing_signed_in_flight: false,
6816 announcement_sigs: None,
6818 #[cfg(any(test, fuzzing))]
6819 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6820 #[cfg(any(test, fuzzing))]
6821 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6823 workaround_lnd_bug_4006: None,
6824 sent_message_awaiting_response: None,
6826 latest_inbound_scid_alias: None,
6827 outbound_scid_alias: 0,
6829 channel_pending_event_emitted: false,
6830 channel_ready_event_emitted: false,
6832 #[cfg(any(test, fuzzing))]
6833 historical_inbound_htlc_fulfills: HashSet::new(),
6838 blocked_monitor_updates: Vec::new(),
6840 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6846 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6847 /// should be sent back to the counterparty node.
6849 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6850 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6851 if self.context.is_outbound() {
6852 panic!("Tried to send accept_channel for an outbound channel?");
6854 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6855 panic!("Tried to send accept_channel after channel had moved forward");
6857 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6858 panic!("Tried to send an accept_channel for a channel that has already advanced");
6861 self.generate_accept_channel_message()
6864 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6865 /// inbound channel. If the intention is to accept an inbound channel, use
6866 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6868 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6869 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6870 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6871 let keys = self.context.get_holder_pubkeys();
6873 msgs::AcceptChannel {
6874 temporary_channel_id: self.context.channel_id,
6875 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6876 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6877 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6878 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6879 minimum_depth: self.context.minimum_depth.unwrap(),
6880 to_self_delay: self.context.get_holder_selected_contest_delay(),
6881 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6882 funding_pubkey: keys.funding_pubkey,
6883 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6884 payment_point: keys.payment_point,
6885 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6886 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6887 first_per_commitment_point,
6888 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6889 Some(script) => script.clone().into_inner(),
6890 None => Builder::new().into_script(),
6892 channel_type: Some(self.context.channel_type.clone()),
6894 next_local_nonce: None,
6898 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6899 /// inbound channel without accepting it.
6901 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6903 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6904 self.generate_accept_channel_message()
6907 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
6908 let funding_script = self.context.get_funding_redeemscript();
6910 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6911 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6912 let trusted_tx = initial_commitment_tx.trust();
6913 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6914 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6915 // They sign the holder commitment transaction...
6916 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6917 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6918 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6919 encode::serialize_hex(&funding_script), &self.context.channel_id());
6920 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6922 Ok(initial_commitment_tx)
6925 pub fn funding_created<L: Deref>(
6926 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6927 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
6931 if self.context.is_outbound() {
6932 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6934 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6935 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6936 // remember the channel, so it's safe to just send an error_message here and drop the
6938 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6940 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6941 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6942 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6943 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6946 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6947 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6948 // This is an externally observable change before we finish all our checks. In particular
6949 // check_funding_created_signature may fail.
6950 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6952 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
6954 Err(ChannelError::Close(e)) => {
6955 self.context.channel_transaction_parameters.funding_outpoint = None;
6956 return Err((self, ChannelError::Close(e)));
6959 // The only error we know how to handle is ChannelError::Close, so we fall over here
6960 // to make sure we don't continue with an inconsistent state.
6961 panic!("unexpected error type from check_funding_created_signature {:?}", e);
6965 let holder_commitment_tx = HolderCommitmentTransaction::new(
6966 initial_commitment_tx,
6969 &self.context.get_holder_pubkeys().funding_pubkey,
6970 self.context.counterparty_funding_pubkey()
6973 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6974 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6977 // Now that we're past error-generating stuff, update our local state:
6979 self.context.channel_state = ChannelState::FundingSent as u32;
6980 self.context.channel_id = funding_txo.to_channel_id();
6981 self.context.cur_counterparty_commitment_transaction_number -= 1;
6982 self.context.cur_holder_commitment_transaction_number -= 1;
6984 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
6986 let funding_redeemscript = self.context.get_funding_redeemscript();
6987 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6988 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6989 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6990 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6991 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6992 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6993 shutdown_script, self.context.get_holder_selected_contest_delay(),
6994 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6995 &self.context.channel_transaction_parameters,
6996 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6998 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6999 let logger_with_chan_monitor = WithChannelMonitor::from(logger, &channel_monitor);
7000 channel_monitor.provide_initial_counterparty_commitment_tx(
7001 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7002 self.context.cur_counterparty_commitment_transaction_number + 1,
7003 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7004 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7005 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), &&logger_with_chan_monitor);
7007 log_info!(logger, "{} funding_signed for peer for channel {}",
7008 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7010 // Promote the channel to a full-fledged one now that we have updated the state and have a
7011 // `ChannelMonitor`.
7012 let mut channel = Channel {
7013 context: self.context,
7015 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7016 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7018 Ok((channel, funding_signed, channel_monitor))
7022 const SERIALIZATION_VERSION: u8 = 3;
7023 const MIN_SERIALIZATION_VERSION: u8 = 3;
7025 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7031 impl Writeable for ChannelUpdateStatus {
7032 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7033 // We only care about writing out the current state as it was announced, ie only either
7034 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7035 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7037 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7038 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7039 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7040 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7046 impl Readable for ChannelUpdateStatus {
7047 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7048 Ok(match <u8 as Readable>::read(reader)? {
7049 0 => ChannelUpdateStatus::Enabled,
7050 1 => ChannelUpdateStatus::Disabled,
7051 _ => return Err(DecodeError::InvalidValue),
7056 impl Writeable for AnnouncementSigsState {
7057 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7058 // We only care about writing out the current state as if we had just disconnected, at
7059 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7061 AnnouncementSigsState::NotSent => 0u8.write(writer),
7062 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7063 AnnouncementSigsState::Committed => 0u8.write(writer),
7064 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7069 impl Readable for AnnouncementSigsState {
7070 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7071 Ok(match <u8 as Readable>::read(reader)? {
7072 0 => AnnouncementSigsState::NotSent,
7073 1 => AnnouncementSigsState::PeerReceived,
7074 _ => return Err(DecodeError::InvalidValue),
7079 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7080 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7081 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7084 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7086 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7087 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7088 // the low bytes now and the optional high bytes later.
7089 let user_id_low = self.context.user_id as u64;
7090 user_id_low.write(writer)?;
7092 // Version 1 deserializers expected to read parts of the config object here. Version 2
7093 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7094 // `minimum_depth` we simply write dummy values here.
7095 writer.write_all(&[0; 8])?;
7097 self.context.channel_id.write(writer)?;
7098 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
7099 self.context.channel_value_satoshis.write(writer)?;
7101 self.context.latest_monitor_update_id.write(writer)?;
7103 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7104 // deserialized from that format.
7105 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7106 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7107 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7109 self.context.destination_script.write(writer)?;
7111 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7112 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7113 self.context.value_to_self_msat.write(writer)?;
7115 let mut dropped_inbound_htlcs = 0;
7116 for htlc in self.context.pending_inbound_htlcs.iter() {
7117 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7118 dropped_inbound_htlcs += 1;
7121 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7122 for htlc in self.context.pending_inbound_htlcs.iter() {
7123 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7126 htlc.htlc_id.write(writer)?;
7127 htlc.amount_msat.write(writer)?;
7128 htlc.cltv_expiry.write(writer)?;
7129 htlc.payment_hash.write(writer)?;
7131 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7132 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7134 htlc_state.write(writer)?;
7136 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7138 htlc_state.write(writer)?;
7140 &InboundHTLCState::Committed => {
7143 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7145 removal_reason.write(writer)?;
7150 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7151 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7152 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7154 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7155 for htlc in self.context.pending_outbound_htlcs.iter() {
7156 htlc.htlc_id.write(writer)?;
7157 htlc.amount_msat.write(writer)?;
7158 htlc.cltv_expiry.write(writer)?;
7159 htlc.payment_hash.write(writer)?;
7160 htlc.source.write(writer)?;
7162 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7164 onion_packet.write(writer)?;
7166 &OutboundHTLCState::Committed => {
7169 &OutboundHTLCState::RemoteRemoved(_) => {
7170 // Treat this as a Committed because we haven't received the CS - they'll
7171 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7174 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7176 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7177 preimages.push(preimage);
7179 let reason: Option<&HTLCFailReason> = outcome.into();
7180 reason.write(writer)?;
7182 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7184 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7185 preimages.push(preimage);
7187 let reason: Option<&HTLCFailReason> = outcome.into();
7188 reason.write(writer)?;
7191 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7192 pending_outbound_blinding_points.push(htlc.blinding_point);
7195 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7196 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7197 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7198 for update in self.context.holding_cell_htlc_updates.iter() {
7200 &HTLCUpdateAwaitingACK::AddHTLC {
7201 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7202 blinding_point, skimmed_fee_msat,
7205 amount_msat.write(writer)?;
7206 cltv_expiry.write(writer)?;
7207 payment_hash.write(writer)?;
7208 source.write(writer)?;
7209 onion_routing_packet.write(writer)?;
7211 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7212 holding_cell_blinding_points.push(blinding_point);
7214 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7216 payment_preimage.write(writer)?;
7217 htlc_id.write(writer)?;
7219 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7221 htlc_id.write(writer)?;
7222 err_packet.write(writer)?;
7227 match self.context.resend_order {
7228 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7229 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7232 self.context.monitor_pending_channel_ready.write(writer)?;
7233 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7234 self.context.monitor_pending_commitment_signed.write(writer)?;
7236 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7237 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7238 pending_forward.write(writer)?;
7239 htlc_id.write(writer)?;
7242 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7243 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7244 htlc_source.write(writer)?;
7245 payment_hash.write(writer)?;
7246 fail_reason.write(writer)?;
7249 if self.context.is_outbound() {
7250 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7251 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7252 Some(feerate).write(writer)?;
7254 // As for inbound HTLCs, if the update was only announced and never committed in a
7255 // commitment_signed, drop it.
7256 None::<u32>.write(writer)?;
7258 self.context.holding_cell_update_fee.write(writer)?;
7260 self.context.next_holder_htlc_id.write(writer)?;
7261 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7262 self.context.update_time_counter.write(writer)?;
7263 self.context.feerate_per_kw.write(writer)?;
7265 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7266 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7267 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7268 // consider the stale state on reload.
7271 self.context.funding_tx_confirmed_in.write(writer)?;
7272 self.context.funding_tx_confirmation_height.write(writer)?;
7273 self.context.short_channel_id.write(writer)?;
7275 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7276 self.context.holder_dust_limit_satoshis.write(writer)?;
7277 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7279 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7280 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7282 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7283 self.context.holder_htlc_minimum_msat.write(writer)?;
7284 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7286 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7287 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7289 match &self.context.counterparty_forwarding_info {
7292 info.fee_base_msat.write(writer)?;
7293 info.fee_proportional_millionths.write(writer)?;
7294 info.cltv_expiry_delta.write(writer)?;
7296 None => 0u8.write(writer)?
7299 self.context.channel_transaction_parameters.write(writer)?;
7300 self.context.funding_transaction.write(writer)?;
7302 self.context.counterparty_cur_commitment_point.write(writer)?;
7303 self.context.counterparty_prev_commitment_point.write(writer)?;
7304 self.context.counterparty_node_id.write(writer)?;
7306 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7308 self.context.commitment_secrets.write(writer)?;
7310 self.context.channel_update_status.write(writer)?;
7312 #[cfg(any(test, fuzzing))]
7313 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7314 #[cfg(any(test, fuzzing))]
7315 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7316 htlc.write(writer)?;
7319 // If the channel type is something other than only-static-remote-key, then we need to have
7320 // older clients fail to deserialize this channel at all. If the type is
7321 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7323 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7324 Some(&self.context.channel_type) } else { None };
7326 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7327 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7328 // a different percentage of the channel value then 10%, which older versions of LDK used
7329 // to set it to before the percentage was made configurable.
7330 let serialized_holder_selected_reserve =
7331 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7332 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7334 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7335 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7336 let serialized_holder_htlc_max_in_flight =
7337 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7338 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7340 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7341 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7343 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7344 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7345 // we write the high bytes as an option here.
7346 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7348 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7350 write_tlv_fields!(writer, {
7351 (0, self.context.announcement_sigs, option),
7352 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7353 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7354 // them twice, once with their original default values above, and once as an option
7355 // here. On the read side, old versions will simply ignore the odd-type entries here,
7356 // and new versions map the default values to None and allow the TLV entries here to
7358 (1, self.context.minimum_depth, option),
7359 (2, chan_type, option),
7360 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7361 (4, serialized_holder_selected_reserve, option),
7362 (5, self.context.config, required),
7363 (6, serialized_holder_htlc_max_in_flight, option),
7364 (7, self.context.shutdown_scriptpubkey, option),
7365 (8, self.context.blocked_monitor_updates, optional_vec),
7366 (9, self.context.target_closing_feerate_sats_per_kw, option),
7367 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7368 (13, self.context.channel_creation_height, required),
7369 (15, preimages, required_vec),
7370 (17, self.context.announcement_sigs_state, required),
7371 (19, self.context.latest_inbound_scid_alias, option),
7372 (21, self.context.outbound_scid_alias, required),
7373 (23, channel_ready_event_emitted, option),
7374 (25, user_id_high_opt, option),
7375 (27, self.context.channel_keys_id, required),
7376 (28, holder_max_accepted_htlcs, option),
7377 (29, self.context.temporary_channel_id, option),
7378 (31, channel_pending_event_emitted, option),
7379 (35, pending_outbound_skimmed_fees, optional_vec),
7380 (37, holding_cell_skimmed_fees, optional_vec),
7381 (38, self.context.is_batch_funding, option),
7382 (39, pending_outbound_blinding_points, optional_vec),
7383 (41, holding_cell_blinding_points, optional_vec),
7390 const MAX_ALLOC_SIZE: usize = 64*1024;
7391 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7393 ES::Target: EntropySource,
7394 SP::Target: SignerProvider
7396 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7397 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7398 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7400 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7401 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7402 // the low bytes now and the high bytes later.
7403 let user_id_low: u64 = Readable::read(reader)?;
7405 let mut config = Some(LegacyChannelConfig::default());
7407 // Read the old serialization of the ChannelConfig from version 0.0.98.
7408 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7409 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7410 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7411 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7413 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7414 let mut _val: u64 = Readable::read(reader)?;
7417 let channel_id = Readable::read(reader)?;
7418 let channel_state = Readable::read(reader)?;
7419 let channel_value_satoshis = Readable::read(reader)?;
7421 let latest_monitor_update_id = Readable::read(reader)?;
7423 let mut keys_data = None;
7425 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7426 // the `channel_keys_id` TLV is present below.
7427 let keys_len: u32 = Readable::read(reader)?;
7428 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7429 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7430 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7431 let mut data = [0; 1024];
7432 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7433 reader.read_exact(read_slice)?;
7434 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7438 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7439 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7440 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7443 let destination_script = Readable::read(reader)?;
7445 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7446 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7447 let value_to_self_msat = Readable::read(reader)?;
7449 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7451 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7452 for _ in 0..pending_inbound_htlc_count {
7453 pending_inbound_htlcs.push(InboundHTLCOutput {
7454 htlc_id: Readable::read(reader)?,
7455 amount_msat: Readable::read(reader)?,
7456 cltv_expiry: Readable::read(reader)?,
7457 payment_hash: Readable::read(reader)?,
7458 state: match <u8 as Readable>::read(reader)? {
7459 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7460 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7461 3 => InboundHTLCState::Committed,
7462 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7463 _ => return Err(DecodeError::InvalidValue),
7468 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7469 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7470 for _ in 0..pending_outbound_htlc_count {
7471 pending_outbound_htlcs.push(OutboundHTLCOutput {
7472 htlc_id: Readable::read(reader)?,
7473 amount_msat: Readable::read(reader)?,
7474 cltv_expiry: Readable::read(reader)?,
7475 payment_hash: Readable::read(reader)?,
7476 source: Readable::read(reader)?,
7477 state: match <u8 as Readable>::read(reader)? {
7478 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7479 1 => OutboundHTLCState::Committed,
7481 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7482 OutboundHTLCState::RemoteRemoved(option.into())
7485 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7486 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7489 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7490 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7492 _ => return Err(DecodeError::InvalidValue),
7494 skimmed_fee_msat: None,
7495 blinding_point: None,
7499 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7500 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7501 for _ in 0..holding_cell_htlc_update_count {
7502 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7503 0 => HTLCUpdateAwaitingACK::AddHTLC {
7504 amount_msat: Readable::read(reader)?,
7505 cltv_expiry: Readable::read(reader)?,
7506 payment_hash: Readable::read(reader)?,
7507 source: Readable::read(reader)?,
7508 onion_routing_packet: Readable::read(reader)?,
7509 skimmed_fee_msat: None,
7510 blinding_point: None,
7512 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7513 payment_preimage: Readable::read(reader)?,
7514 htlc_id: Readable::read(reader)?,
7516 2 => HTLCUpdateAwaitingACK::FailHTLC {
7517 htlc_id: Readable::read(reader)?,
7518 err_packet: Readable::read(reader)?,
7520 _ => return Err(DecodeError::InvalidValue),
7524 let resend_order = match <u8 as Readable>::read(reader)? {
7525 0 => RAACommitmentOrder::CommitmentFirst,
7526 1 => RAACommitmentOrder::RevokeAndACKFirst,
7527 _ => return Err(DecodeError::InvalidValue),
7530 let monitor_pending_channel_ready = Readable::read(reader)?;
7531 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7532 let monitor_pending_commitment_signed = Readable::read(reader)?;
7534 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7535 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7536 for _ in 0..monitor_pending_forwards_count {
7537 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7540 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7541 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7542 for _ in 0..monitor_pending_failures_count {
7543 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7546 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7548 let holding_cell_update_fee = Readable::read(reader)?;
7550 let next_holder_htlc_id = Readable::read(reader)?;
7551 let next_counterparty_htlc_id = Readable::read(reader)?;
7552 let update_time_counter = Readable::read(reader)?;
7553 let feerate_per_kw = Readable::read(reader)?;
7555 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7556 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7557 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7558 // consider the stale state on reload.
7559 match <u8 as Readable>::read(reader)? {
7562 let _: u32 = Readable::read(reader)?;
7563 let _: u64 = Readable::read(reader)?;
7564 let _: Signature = Readable::read(reader)?;
7566 _ => return Err(DecodeError::InvalidValue),
7569 let funding_tx_confirmed_in = Readable::read(reader)?;
7570 let funding_tx_confirmation_height = Readable::read(reader)?;
7571 let short_channel_id = Readable::read(reader)?;
7573 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7574 let holder_dust_limit_satoshis = Readable::read(reader)?;
7575 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7576 let mut counterparty_selected_channel_reserve_satoshis = None;
7578 // Read the old serialization from version 0.0.98.
7579 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7581 // Read the 8 bytes of backwards-compatibility data.
7582 let _dummy: u64 = Readable::read(reader)?;
7584 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7585 let holder_htlc_minimum_msat = Readable::read(reader)?;
7586 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7588 let mut minimum_depth = None;
7590 // Read the old serialization from version 0.0.98.
7591 minimum_depth = Some(Readable::read(reader)?);
7593 // Read the 4 bytes of backwards-compatibility data.
7594 let _dummy: u32 = Readable::read(reader)?;
7597 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7599 1 => Some(CounterpartyForwardingInfo {
7600 fee_base_msat: Readable::read(reader)?,
7601 fee_proportional_millionths: Readable::read(reader)?,
7602 cltv_expiry_delta: Readable::read(reader)?,
7604 _ => return Err(DecodeError::InvalidValue),
7607 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7608 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7610 let counterparty_cur_commitment_point = Readable::read(reader)?;
7612 let counterparty_prev_commitment_point = Readable::read(reader)?;
7613 let counterparty_node_id = Readable::read(reader)?;
7615 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7616 let commitment_secrets = Readable::read(reader)?;
7618 let channel_update_status = Readable::read(reader)?;
7620 #[cfg(any(test, fuzzing))]
7621 let mut historical_inbound_htlc_fulfills = HashSet::new();
7622 #[cfg(any(test, fuzzing))]
7624 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7625 for _ in 0..htlc_fulfills_len {
7626 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7630 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7631 Some((feerate, if channel_parameters.is_outbound_from_holder {
7632 FeeUpdateState::Outbound
7634 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7640 let mut announcement_sigs = None;
7641 let mut target_closing_feerate_sats_per_kw = None;
7642 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7643 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7644 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7645 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7646 // only, so we default to that if none was written.
7647 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7648 let mut channel_creation_height = Some(serialized_height);
7649 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7651 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7652 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7653 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7654 let mut latest_inbound_scid_alias = None;
7655 let mut outbound_scid_alias = None;
7656 let mut channel_pending_event_emitted = None;
7657 let mut channel_ready_event_emitted = None;
7659 let mut user_id_high_opt: Option<u64> = None;
7660 let mut channel_keys_id: Option<[u8; 32]> = None;
7661 let mut temporary_channel_id: Option<ChannelId> = None;
7662 let mut holder_max_accepted_htlcs: Option<u16> = None;
7664 let mut blocked_monitor_updates = Some(Vec::new());
7666 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7667 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7669 let mut is_batch_funding: Option<()> = None;
7671 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7672 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7674 read_tlv_fields!(reader, {
7675 (0, announcement_sigs, option),
7676 (1, minimum_depth, option),
7677 (2, channel_type, option),
7678 (3, counterparty_selected_channel_reserve_satoshis, option),
7679 (4, holder_selected_channel_reserve_satoshis, option),
7680 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7681 (6, holder_max_htlc_value_in_flight_msat, option),
7682 (7, shutdown_scriptpubkey, option),
7683 (8, blocked_monitor_updates, optional_vec),
7684 (9, target_closing_feerate_sats_per_kw, option),
7685 (11, monitor_pending_finalized_fulfills, optional_vec),
7686 (13, channel_creation_height, option),
7687 (15, preimages_opt, optional_vec),
7688 (17, announcement_sigs_state, option),
7689 (19, latest_inbound_scid_alias, option),
7690 (21, outbound_scid_alias, option),
7691 (23, channel_ready_event_emitted, option),
7692 (25, user_id_high_opt, option),
7693 (27, channel_keys_id, option),
7694 (28, holder_max_accepted_htlcs, option),
7695 (29, temporary_channel_id, option),
7696 (31, channel_pending_event_emitted, option),
7697 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7698 (37, holding_cell_skimmed_fees_opt, optional_vec),
7699 (38, is_batch_funding, option),
7700 (39, pending_outbound_blinding_points_opt, optional_vec),
7701 (41, holding_cell_blinding_points_opt, optional_vec),
7704 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7705 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7706 // If we've gotten to the funding stage of the channel, populate the signer with its
7707 // required channel parameters.
7708 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7709 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7710 holder_signer.provide_channel_parameters(&channel_parameters);
7712 (channel_keys_id, holder_signer)
7714 // `keys_data` can be `None` if we had corrupted data.
7715 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7716 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7717 (holder_signer.channel_keys_id(), holder_signer)
7720 if let Some(preimages) = preimages_opt {
7721 let mut iter = preimages.into_iter();
7722 for htlc in pending_outbound_htlcs.iter_mut() {
7724 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7725 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7727 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7728 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7733 // We expect all preimages to be consumed above
7734 if iter.next().is_some() {
7735 return Err(DecodeError::InvalidValue);
7739 let chan_features = channel_type.as_ref().unwrap();
7740 if !chan_features.is_subset(our_supported_features) {
7741 // If the channel was written by a new version and negotiated with features we don't
7742 // understand yet, refuse to read it.
7743 return Err(DecodeError::UnknownRequiredFeature);
7746 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7747 // To account for that, we're proactively setting/overriding the field here.
7748 channel_parameters.channel_type_features = chan_features.clone();
7750 let mut secp_ctx = Secp256k1::new();
7751 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7753 // `user_id` used to be a single u64 value. In order to remain backwards
7754 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7755 // separate u64 values.
7756 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7758 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7760 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7761 let mut iter = skimmed_fees.into_iter();
7762 for htlc in pending_outbound_htlcs.iter_mut() {
7763 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7765 // We expect all skimmed fees to be consumed above
7766 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7768 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7769 let mut iter = skimmed_fees.into_iter();
7770 for htlc in holding_cell_htlc_updates.iter_mut() {
7771 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7772 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7775 // We expect all skimmed fees to be consumed above
7776 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7778 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
7779 let mut iter = blinding_pts.into_iter();
7780 for htlc in pending_outbound_htlcs.iter_mut() {
7781 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7783 // We expect all blinding points to be consumed above
7784 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7786 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
7787 let mut iter = blinding_pts.into_iter();
7788 for htlc in holding_cell_htlc_updates.iter_mut() {
7789 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
7790 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7793 // We expect all blinding points to be consumed above
7794 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7798 context: ChannelContext {
7801 config: config.unwrap(),
7805 // Note that we don't care about serializing handshake limits as we only ever serialize
7806 // channel data after the handshake has completed.
7807 inbound_handshake_limits_override: None,
7810 temporary_channel_id,
7812 announcement_sigs_state: announcement_sigs_state.unwrap(),
7814 channel_value_satoshis,
7816 latest_monitor_update_id,
7818 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7819 shutdown_scriptpubkey,
7822 cur_holder_commitment_transaction_number,
7823 cur_counterparty_commitment_transaction_number,
7826 holder_max_accepted_htlcs,
7827 pending_inbound_htlcs,
7828 pending_outbound_htlcs,
7829 holding_cell_htlc_updates,
7833 monitor_pending_channel_ready,
7834 monitor_pending_revoke_and_ack,
7835 monitor_pending_commitment_signed,
7836 monitor_pending_forwards,
7837 monitor_pending_failures,
7838 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7840 signer_pending_commitment_update: false,
7841 signer_pending_funding: false,
7844 holding_cell_update_fee,
7845 next_holder_htlc_id,
7846 next_counterparty_htlc_id,
7847 update_time_counter,
7850 #[cfg(debug_assertions)]
7851 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7852 #[cfg(debug_assertions)]
7853 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7855 last_sent_closing_fee: None,
7856 pending_counterparty_closing_signed: None,
7857 expecting_peer_commitment_signed: false,
7858 closing_fee_limits: None,
7859 target_closing_feerate_sats_per_kw,
7861 funding_tx_confirmed_in,
7862 funding_tx_confirmation_height,
7864 channel_creation_height: channel_creation_height.unwrap(),
7866 counterparty_dust_limit_satoshis,
7867 holder_dust_limit_satoshis,
7868 counterparty_max_htlc_value_in_flight_msat,
7869 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7870 counterparty_selected_channel_reserve_satoshis,
7871 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7872 counterparty_htlc_minimum_msat,
7873 holder_htlc_minimum_msat,
7874 counterparty_max_accepted_htlcs,
7877 counterparty_forwarding_info,
7879 channel_transaction_parameters: channel_parameters,
7880 funding_transaction,
7883 counterparty_cur_commitment_point,
7884 counterparty_prev_commitment_point,
7885 counterparty_node_id,
7887 counterparty_shutdown_scriptpubkey,
7891 channel_update_status,
7892 closing_signed_in_flight: false,
7896 #[cfg(any(test, fuzzing))]
7897 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7898 #[cfg(any(test, fuzzing))]
7899 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7901 workaround_lnd_bug_4006: None,
7902 sent_message_awaiting_response: None,
7904 latest_inbound_scid_alias,
7905 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7906 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7908 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7909 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7911 #[cfg(any(test, fuzzing))]
7912 historical_inbound_htlc_fulfills,
7914 channel_type: channel_type.unwrap(),
7917 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7926 use bitcoin::blockdata::constants::ChainHash;
7927 use bitcoin::blockdata::script::{ScriptBuf, Builder};
7928 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7929 use bitcoin::blockdata::opcodes;
7930 use bitcoin::network::constants::Network;
7931 use crate::ln::{PaymentHash, PaymentPreimage};
7932 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
7933 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7934 use crate::ln::channel::InitFeatures;
7935 use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
7936 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7937 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
7938 use crate::ln::msgs;
7939 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7940 use crate::ln::script::ShutdownScript;
7941 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
7942 use crate::chain::BestBlock;
7943 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7944 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7945 use crate::chain::transaction::OutPoint;
7946 use crate::routing::router::{Path, RouteHop};
7947 use crate::util::config::UserConfig;
7948 use crate::util::errors::APIError;
7949 use crate::util::ser::{ReadableArgs, Writeable};
7950 use crate::util::test_utils;
7951 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7952 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7953 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7954 use bitcoin::secp256k1::{SecretKey,PublicKey};
7955 use bitcoin::hashes::sha256::Hash as Sha256;
7956 use bitcoin::hashes::Hash;
7957 use bitcoin::hashes::hex::FromHex;
7958 use bitcoin::hash_types::WPubkeyHash;
7959 use bitcoin::blockdata::locktime::absolute::LockTime;
7960 use bitcoin::address::{WitnessProgram, WitnessVersion};
7961 use crate::prelude::*;
7963 struct TestFeeEstimator {
7966 impl FeeEstimator for TestFeeEstimator {
7967 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7973 fn test_max_funding_satoshis_no_wumbo() {
7974 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7975 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7976 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7980 signer: InMemorySigner,
7983 impl EntropySource for Keys {
7984 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7987 impl SignerProvider for Keys {
7988 type EcdsaSigner = InMemorySigner;
7990 type TaprootSigner = InMemorySigner;
7992 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7993 self.signer.channel_keys_id()
7996 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8000 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8002 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8003 let secp_ctx = Secp256k1::signing_only();
8004 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8005 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8006 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8009 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8010 let secp_ctx = Secp256k1::signing_only();
8011 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8012 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8016 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8017 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8018 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8022 fn upfront_shutdown_script_incompatibility() {
8023 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8024 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8025 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8028 let seed = [42; 32];
8029 let network = Network::Testnet;
8030 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8031 keys_provider.expect(OnGetShutdownScriptpubkey {
8032 returns: non_v0_segwit_shutdown_script.clone(),
8035 let secp_ctx = Secp256k1::new();
8036 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8037 let config = UserConfig::default();
8038 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8039 Err(APIError::IncompatibleShutdownScript { script }) => {
8040 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8042 Err(e) => panic!("Unexpected error: {:?}", e),
8043 Ok(_) => panic!("Expected error"),
8047 // Check that, during channel creation, we use the same feerate in the open channel message
8048 // as we do in the Channel object creation itself.
8050 fn test_open_channel_msg_fee() {
8051 let original_fee = 253;
8052 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8053 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8054 let secp_ctx = Secp256k1::new();
8055 let seed = [42; 32];
8056 let network = Network::Testnet;
8057 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8059 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8060 let config = UserConfig::default();
8061 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8063 // Now change the fee so we can check that the fee in the open_channel message is the
8064 // same as the old fee.
8065 fee_est.fee_est = 500;
8066 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8067 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8071 fn test_holder_vs_counterparty_dust_limit() {
8072 // Test that when calculating the local and remote commitment transaction fees, the correct
8073 // dust limits are used.
8074 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8075 let secp_ctx = Secp256k1::new();
8076 let seed = [42; 32];
8077 let network = Network::Testnet;
8078 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8079 let logger = test_utils::TestLogger::new();
8080 let best_block = BestBlock::from_network(network);
8082 // Go through the flow of opening a channel between two nodes, making sure
8083 // they have different dust limits.
8085 // Create Node A's channel pointing to Node B's pubkey
8086 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8087 let config = UserConfig::default();
8088 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8090 // Create Node B's channel by receiving Node A's open_channel message
8091 // Make sure A's dust limit is as we expect.
8092 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8093 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8094 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8096 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8097 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8098 accept_channel_msg.dust_limit_satoshis = 546;
8099 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8100 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8102 // Node A --> Node B: funding created
8103 let output_script = node_a_chan.context.get_funding_redeemscript();
8104 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8105 value: 10000000, script_pubkey: output_script.clone(),
8107 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8108 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8109 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8111 // Node B --> Node A: funding signed
8112 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8114 // Put some inbound and outbound HTLCs in A's channel.
8115 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8116 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8118 amount_msat: htlc_amount_msat,
8119 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8120 cltv_expiry: 300000000,
8121 state: InboundHTLCState::Committed,
8124 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8126 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8127 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8128 cltv_expiry: 200000000,
8129 state: OutboundHTLCState::Committed,
8130 source: HTLCSource::OutboundRoute {
8131 path: Path { hops: Vec::new(), blinded_tail: None },
8132 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8133 first_hop_htlc_msat: 548,
8134 payment_id: PaymentId([42; 32]),
8136 skimmed_fee_msat: None,
8137 blinding_point: None,
8140 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8141 // the dust limit check.
8142 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8143 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8144 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8145 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8147 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8148 // of the HTLCs are seen to be above the dust limit.
8149 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8150 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8151 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8152 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8153 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8157 fn test_timeout_vs_success_htlc_dust_limit() {
8158 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8159 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8160 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8161 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8162 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8163 let secp_ctx = Secp256k1::new();
8164 let seed = [42; 32];
8165 let network = Network::Testnet;
8166 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8168 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8169 let config = UserConfig::default();
8170 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8172 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8173 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8175 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8176 // counted as dust when it shouldn't be.
8177 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8178 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8179 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8180 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8182 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8183 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8184 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8185 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8186 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8188 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8190 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8191 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8192 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8193 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8194 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8196 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8197 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8198 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8199 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8200 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8204 fn channel_reestablish_no_updates() {
8205 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8206 let logger = test_utils::TestLogger::new();
8207 let secp_ctx = Secp256k1::new();
8208 let seed = [42; 32];
8209 let network = Network::Testnet;
8210 let best_block = BestBlock::from_network(network);
8211 let chain_hash = ChainHash::using_genesis_block(network);
8212 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8214 // Go through the flow of opening a channel between two nodes.
8216 // Create Node A's channel pointing to Node B's pubkey
8217 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8218 let config = UserConfig::default();
8219 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8221 // Create Node B's channel by receiving Node A's open_channel message
8222 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8223 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8224 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8226 // Node B --> Node A: accept channel
8227 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8228 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8230 // Node A --> Node B: funding created
8231 let output_script = node_a_chan.context.get_funding_redeemscript();
8232 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8233 value: 10000000, script_pubkey: output_script.clone(),
8235 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8236 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8237 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8239 // Node B --> Node A: funding signed
8240 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8242 // Now disconnect the two nodes and check that the commitment point in
8243 // Node B's channel_reestablish message is sane.
8244 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8245 let msg = node_b_chan.get_channel_reestablish(&&logger);
8246 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8247 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8248 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8250 // Check that the commitment point in Node A's channel_reestablish message
8252 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8253 let msg = node_a_chan.get_channel_reestablish(&&logger);
8254 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8255 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8256 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8260 fn test_configured_holder_max_htlc_value_in_flight() {
8261 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8262 let logger = test_utils::TestLogger::new();
8263 let secp_ctx = Secp256k1::new();
8264 let seed = [42; 32];
8265 let network = Network::Testnet;
8266 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8267 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8268 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8270 let mut config_2_percent = UserConfig::default();
8271 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8272 let mut config_99_percent = UserConfig::default();
8273 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8274 let mut config_0_percent = UserConfig::default();
8275 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8276 let mut config_101_percent = UserConfig::default();
8277 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8279 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8280 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8281 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8282 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8283 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8284 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8286 // Test with the upper bound - 1 of valid values (99%).
8287 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8288 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8289 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8291 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8293 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8294 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8295 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8296 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8297 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8298 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8300 // Test with the upper bound - 1 of valid values (99%).
8301 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8302 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8303 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8305 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8306 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8307 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8308 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8309 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8311 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8312 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8314 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8315 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8316 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8318 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8319 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8320 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8321 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8322 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8324 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8325 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8327 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8328 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8329 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8333 fn test_configured_holder_selected_channel_reserve_satoshis() {
8335 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8336 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8337 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8339 // Test with valid but unreasonably high channel reserves
8340 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8341 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8342 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8344 // Test with calculated channel reserve less than lower bound
8345 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8346 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8348 // Test with invalid channel reserves since sum of both is greater than or equal
8350 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8351 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8354 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8355 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8356 let logger = test_utils::TestLogger::new();
8357 let secp_ctx = Secp256k1::new();
8358 let seed = [42; 32];
8359 let network = Network::Testnet;
8360 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8361 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8362 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8365 let mut outbound_node_config = UserConfig::default();
8366 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8367 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8369 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8370 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8372 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8373 let mut inbound_node_config = UserConfig::default();
8374 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8376 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8377 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8379 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8381 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8382 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8384 // Channel Negotiations failed
8385 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8386 assert!(result.is_err());
8391 fn channel_update() {
8392 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8393 let logger = test_utils::TestLogger::new();
8394 let secp_ctx = Secp256k1::new();
8395 let seed = [42; 32];
8396 let network = Network::Testnet;
8397 let best_block = BestBlock::from_network(network);
8398 let chain_hash = ChainHash::using_genesis_block(network);
8399 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8401 // Create Node A's channel pointing to Node B's pubkey
8402 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8403 let config = UserConfig::default();
8404 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8406 // Create Node B's channel by receiving Node A's open_channel message
8407 // Make sure A's dust limit is as we expect.
8408 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8409 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8410 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8412 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8413 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8414 accept_channel_msg.dust_limit_satoshis = 546;
8415 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8416 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8418 // Node A --> Node B: funding created
8419 let output_script = node_a_chan.context.get_funding_redeemscript();
8420 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8421 value: 10000000, script_pubkey: output_script.clone(),
8423 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8424 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8425 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8427 // Node B --> Node A: funding signed
8428 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8430 // Make sure that receiving a channel update will update the Channel as expected.
8431 let update = ChannelUpdate {
8432 contents: UnsignedChannelUpdate {
8434 short_channel_id: 0,
8437 cltv_expiry_delta: 100,
8438 htlc_minimum_msat: 5,
8439 htlc_maximum_msat: MAX_VALUE_MSAT,
8441 fee_proportional_millionths: 11,
8442 excess_data: Vec::new(),
8444 signature: Signature::from(unsafe { FFISignature::new() })
8446 assert!(node_a_chan.channel_update(&update).unwrap());
8448 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8449 // change our official htlc_minimum_msat.
8450 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8451 match node_a_chan.context.counterparty_forwarding_info() {
8453 assert_eq!(info.cltv_expiry_delta, 100);
8454 assert_eq!(info.fee_base_msat, 110);
8455 assert_eq!(info.fee_proportional_millionths, 11);
8457 None => panic!("expected counterparty forwarding info to be Some")
8460 assert!(!node_a_chan.channel_update(&update).unwrap());
8464 fn blinding_point_skimmed_fee_ser() {
8465 // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
8466 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8467 let secp_ctx = Secp256k1::new();
8468 let seed = [42; 32];
8469 let network = Network::Testnet;
8470 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8472 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8473 let config = UserConfig::default();
8474 let features = channelmanager::provided_init_features(&config);
8475 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8476 let mut chan = Channel { context: outbound_chan.context };
8478 let dummy_htlc_source = HTLCSource::OutboundRoute {
8480 hops: vec![RouteHop {
8481 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8482 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8483 cltv_expiry_delta: 0, maybe_announced_channel: false,
8487 session_priv: test_utils::privkey(42),
8488 first_hop_htlc_msat: 0,
8489 payment_id: PaymentId([42; 32]),
8491 let dummy_outbound_output = OutboundHTLCOutput {
8494 payment_hash: PaymentHash([43; 32]),
8496 state: OutboundHTLCState::Committed,
8497 source: dummy_htlc_source.clone(),
8498 skimmed_fee_msat: None,
8499 blinding_point: None,
8501 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8502 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8504 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8507 htlc.skimmed_fee_msat = Some(1);
8510 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8512 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8515 payment_hash: PaymentHash([43; 32]),
8516 source: dummy_htlc_source.clone(),
8517 onion_routing_packet: msgs::OnionPacket {
8519 public_key: Ok(test_utils::pubkey(1)),
8520 hop_data: [0; 20*65],
8523 skimmed_fee_msat: None,
8524 blinding_point: None,
8526 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8527 payment_preimage: PaymentPreimage([42; 32]),
8530 let mut holding_cell_htlc_updates = Vec::with_capacity(10);
8533 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8534 } else if i % 3 == 1 {
8535 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8537 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8538 if let HTLCUpdateAwaitingACK::AddHTLC {
8539 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8540 } = &mut dummy_add {
8541 *blinding_point = Some(test_utils::pubkey(42 + i));
8542 *skimmed_fee_msat = Some(42);
8544 holding_cell_htlc_updates.push(dummy_add);
8547 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8549 // Encode and decode the channel and ensure that the HTLCs within are the same.
8550 let encoded_chan = chan.encode();
8551 let mut s = crate::io::Cursor::new(&encoded_chan);
8552 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8553 let features = channelmanager::provided_channel_type_features(&config);
8554 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8555 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8556 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8559 #[cfg(feature = "_test_vectors")]
8561 fn outbound_commitment_test() {
8562 use bitcoin::sighash;
8563 use bitcoin::consensus::encode::serialize;
8564 use bitcoin::sighash::EcdsaSighashType;
8565 use bitcoin::hashes::hex::FromHex;
8566 use bitcoin::hash_types::Txid;
8567 use bitcoin::secp256k1::Message;
8568 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8569 use crate::ln::PaymentPreimage;
8570 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8571 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8572 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8573 use crate::util::logger::Logger;
8574 use crate::sync::Arc;
8575 use core::str::FromStr;
8576 use hex::DisplayHex;
8578 // Test vectors from BOLT 3 Appendices C and F (anchors):
8579 let feeest = TestFeeEstimator{fee_est: 15000};
8580 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8581 let secp_ctx = Secp256k1::new();
8583 let mut signer = InMemorySigner::new(
8585 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8586 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8587 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8588 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8589 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8591 // These aren't set in the test vectors:
8592 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8598 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8599 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8600 let keys_provider = Keys { signer: signer.clone() };
8602 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8603 let mut config = UserConfig::default();
8604 config.channel_handshake_config.announced_channel = false;
8605 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8606 chan.context.holder_dust_limit_satoshis = 546;
8607 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8609 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8611 let counterparty_pubkeys = ChannelPublicKeys {
8612 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8613 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8614 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8615 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8616 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8618 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8619 CounterpartyChannelTransactionParameters {
8620 pubkeys: counterparty_pubkeys.clone(),
8621 selected_contest_delay: 144
8623 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8624 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8626 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8627 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8629 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8630 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8632 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8633 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8635 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8636 // derived from a commitment_seed, so instead we copy it here and call
8637 // build_commitment_transaction.
8638 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8639 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8640 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8641 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8642 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8644 macro_rules! test_commitment {
8645 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8646 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8647 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8651 macro_rules! test_commitment_with_anchors {
8652 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8653 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8654 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8658 macro_rules! test_commitment_common {
8659 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8660 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8662 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8663 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8665 let htlcs = commitment_stats.htlcs_included.drain(..)
8666 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8668 (commitment_stats.tx, htlcs)
8670 let trusted_tx = commitment_tx.trust();
8671 let unsigned_tx = trusted_tx.built_transaction();
8672 let redeemscript = chan.context.get_funding_redeemscript();
8673 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8674 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8675 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8676 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8678 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8679 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8680 let mut counterparty_htlc_sigs = Vec::new();
8681 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8683 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8684 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8685 counterparty_htlc_sigs.push(remote_signature);
8687 assert_eq!(htlcs.len(), per_htlc.len());
8689 let holder_commitment_tx = HolderCommitmentTransaction::new(
8690 commitment_tx.clone(),
8691 counterparty_signature,
8692 counterparty_htlc_sigs,
8693 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8694 chan.context.counterparty_funding_pubkey()
8696 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8697 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8699 let funding_redeemscript = chan.context.get_funding_redeemscript();
8700 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8701 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8703 // ((htlc, counterparty_sig), (index, holder_sig))
8704 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8707 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8708 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8710 let ref htlc = htlcs[$htlc_idx];
8711 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8712 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8713 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8714 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8715 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8716 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8717 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8719 let mut preimage: Option<PaymentPreimage> = None;
8722 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8723 if out == htlc.payment_hash {
8724 preimage = Some(PaymentPreimage([i; 32]));
8728 assert!(preimage.is_some());
8731 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8732 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8733 channel_derivation_parameters: ChannelDerivationParameters {
8734 value_satoshis: chan.context.channel_value_satoshis,
8735 keys_id: chan.context.channel_keys_id,
8736 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8738 commitment_txid: trusted_tx.txid(),
8739 per_commitment_number: trusted_tx.commitment_number(),
8740 per_commitment_point: trusted_tx.per_commitment_point(),
8741 feerate_per_kw: trusted_tx.feerate_per_kw(),
8743 preimage: preimage.clone(),
8744 counterparty_sig: *htlc_counterparty_sig,
8745 }, &secp_ctx).unwrap();
8746 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8747 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8749 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
8750 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8751 let trusted_tx = holder_commitment_tx.trust();
8752 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8753 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
8754 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
8756 assert!(htlc_counterparty_sig_iter.next().is_none());
8760 // anchors: simple commitment tx with no HTLCs and single anchor
8761 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8762 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8763 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8765 // simple commitment tx with no HTLCs
8766 chan.context.value_to_self_msat = 7000000000;
8768 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8769 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8770 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8772 // anchors: simple commitment tx with no HTLCs
8773 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8774 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8775 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8777 chan.context.pending_inbound_htlcs.push({
8778 let mut out = InboundHTLCOutput{
8780 amount_msat: 1000000,
8782 payment_hash: PaymentHash([0; 32]),
8783 state: InboundHTLCState::Committed,
8785 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
8788 chan.context.pending_inbound_htlcs.push({
8789 let mut out = InboundHTLCOutput{
8791 amount_msat: 2000000,
8793 payment_hash: PaymentHash([0; 32]),
8794 state: InboundHTLCState::Committed,
8796 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
8799 chan.context.pending_outbound_htlcs.push({
8800 let mut out = OutboundHTLCOutput{
8802 amount_msat: 2000000,
8804 payment_hash: PaymentHash([0; 32]),
8805 state: OutboundHTLCState::Committed,
8806 source: HTLCSource::dummy(),
8807 skimmed_fee_msat: None,
8808 blinding_point: None,
8810 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
8813 chan.context.pending_outbound_htlcs.push({
8814 let mut out = OutboundHTLCOutput{
8816 amount_msat: 3000000,
8818 payment_hash: PaymentHash([0; 32]),
8819 state: OutboundHTLCState::Committed,
8820 source: HTLCSource::dummy(),
8821 skimmed_fee_msat: None,
8822 blinding_point: None,
8824 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
8827 chan.context.pending_inbound_htlcs.push({
8828 let mut out = InboundHTLCOutput{
8830 amount_msat: 4000000,
8832 payment_hash: PaymentHash([0; 32]),
8833 state: InboundHTLCState::Committed,
8835 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
8839 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8840 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8841 chan.context.feerate_per_kw = 0;
8843 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8844 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8845 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8848 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8849 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8850 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8853 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8854 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8855 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8858 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8859 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8860 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8863 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8864 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8865 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8868 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8869 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8870 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8873 // commitment tx with seven outputs untrimmed (maximum feerate)
8874 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8875 chan.context.feerate_per_kw = 647;
8877 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8878 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8879 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8882 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8883 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8884 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8887 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8888 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8889 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8892 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8893 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8894 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8897 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8898 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8899 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8902 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8903 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8904 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8907 // commitment tx with six outputs untrimmed (minimum feerate)
8908 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8909 chan.context.feerate_per_kw = 648;
8911 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8912 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8913 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8916 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8917 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8918 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8921 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8922 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8923 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8926 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8927 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8928 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8931 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8932 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8933 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8936 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8937 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8938 chan.context.feerate_per_kw = 645;
8939 chan.context.holder_dust_limit_satoshis = 1001;
8941 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8942 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8943 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8946 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8947 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8948 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8951 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8952 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8953 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8956 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8957 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8958 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8961 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8962 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8963 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8966 // commitment tx with six outputs untrimmed (maximum feerate)
8967 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8968 chan.context.feerate_per_kw = 2069;
8969 chan.context.holder_dust_limit_satoshis = 546;
8971 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8972 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8973 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8976 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8977 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8978 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8981 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8982 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8983 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8986 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8987 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8988 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8991 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8992 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8993 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8996 // commitment tx with five outputs untrimmed (minimum feerate)
8997 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8998 chan.context.feerate_per_kw = 2070;
9000 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9001 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9002 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9005 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9006 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9007 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9010 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9011 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9012 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9015 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9016 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9017 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9020 // commitment tx with five outputs untrimmed (maximum feerate)
9021 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9022 chan.context.feerate_per_kw = 2194;
9024 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9025 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9026 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9029 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9030 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9031 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9034 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9035 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9036 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9039 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9040 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9041 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9044 // commitment tx with four outputs untrimmed (minimum feerate)
9045 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9046 chan.context.feerate_per_kw = 2195;
9048 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9049 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9050 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9053 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9054 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9055 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9058 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9059 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9060 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9063 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9064 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9065 chan.context.feerate_per_kw = 2185;
9066 chan.context.holder_dust_limit_satoshis = 2001;
9067 let cached_channel_type = chan.context.channel_type;
9068 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9070 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9071 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9072 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9075 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9076 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9077 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9080 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9081 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9082 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9085 // commitment tx with four outputs untrimmed (maximum feerate)
9086 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9087 chan.context.feerate_per_kw = 3702;
9088 chan.context.holder_dust_limit_satoshis = 546;
9089 chan.context.channel_type = cached_channel_type.clone();
9091 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9092 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9093 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9096 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9097 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9098 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9101 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9102 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9103 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9106 // commitment tx with three outputs untrimmed (minimum feerate)
9107 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9108 chan.context.feerate_per_kw = 3703;
9110 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9111 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9112 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9115 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9116 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9117 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9120 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9121 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9122 chan.context.feerate_per_kw = 3687;
9123 chan.context.holder_dust_limit_satoshis = 3001;
9124 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9126 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9127 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9128 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9131 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9132 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9133 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9136 // commitment tx with three outputs untrimmed (maximum feerate)
9137 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9138 chan.context.feerate_per_kw = 4914;
9139 chan.context.holder_dust_limit_satoshis = 546;
9140 chan.context.channel_type = cached_channel_type.clone();
9142 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9143 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9144 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9147 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9148 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9149 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9152 // commitment tx with two outputs untrimmed (minimum feerate)
9153 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9154 chan.context.feerate_per_kw = 4915;
9155 chan.context.holder_dust_limit_satoshis = 546;
9157 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9158 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9159 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9161 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9162 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9163 chan.context.feerate_per_kw = 4894;
9164 chan.context.holder_dust_limit_satoshis = 4001;
9165 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9167 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9168 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9169 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9171 // commitment tx with two outputs untrimmed (maximum feerate)
9172 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9173 chan.context.feerate_per_kw = 9651180;
9174 chan.context.holder_dust_limit_satoshis = 546;
9175 chan.context.channel_type = cached_channel_type.clone();
9177 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9178 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9179 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9181 // commitment tx with one output untrimmed (minimum feerate)
9182 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9183 chan.context.feerate_per_kw = 9651181;
9185 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9186 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9187 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9189 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9190 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9191 chan.context.feerate_per_kw = 6216010;
9192 chan.context.holder_dust_limit_satoshis = 4001;
9193 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9195 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9196 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9197 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9199 // commitment tx with fee greater than funder amount
9200 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9201 chan.context.feerate_per_kw = 9651936;
9202 chan.context.holder_dust_limit_satoshis = 546;
9203 chan.context.channel_type = cached_channel_type;
9205 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9206 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9207 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9209 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9210 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9211 chan.context.feerate_per_kw = 253;
9212 chan.context.pending_inbound_htlcs.clear();
9213 chan.context.pending_inbound_htlcs.push({
9214 let mut out = InboundHTLCOutput{
9216 amount_msat: 2000000,
9218 payment_hash: PaymentHash([0; 32]),
9219 state: InboundHTLCState::Committed,
9221 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9224 chan.context.pending_outbound_htlcs.clear();
9225 chan.context.pending_outbound_htlcs.push({
9226 let mut out = OutboundHTLCOutput{
9228 amount_msat: 5000001,
9230 payment_hash: PaymentHash([0; 32]),
9231 state: OutboundHTLCState::Committed,
9232 source: HTLCSource::dummy(),
9233 skimmed_fee_msat: None,
9234 blinding_point: None,
9236 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9239 chan.context.pending_outbound_htlcs.push({
9240 let mut out = OutboundHTLCOutput{
9242 amount_msat: 5000000,
9244 payment_hash: PaymentHash([0; 32]),
9245 state: OutboundHTLCState::Committed,
9246 source: HTLCSource::dummy(),
9247 skimmed_fee_msat: None,
9248 blinding_point: None,
9250 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9254 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9255 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9256 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9259 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9260 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9261 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9263 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9264 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9265 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9267 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9268 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9269 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9272 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9273 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9274 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9275 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9278 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9279 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9280 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9282 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9283 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9284 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9286 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9287 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9288 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9293 fn test_per_commitment_secret_gen() {
9294 // Test vectors from BOLT 3 Appendix D:
9296 let mut seed = [0; 32];
9297 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9298 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9299 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9301 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9302 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9303 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9305 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9306 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9308 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9309 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9311 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9312 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9313 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9317 fn test_key_derivation() {
9318 // Test vectors from BOLT 3 Appendix E:
9319 let secp_ctx = Secp256k1::new();
9321 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9322 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9324 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9325 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9327 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9328 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9330 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9331 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9333 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9334 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9336 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9337 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9341 fn test_zero_conf_channel_type_support() {
9342 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9343 let secp_ctx = Secp256k1::new();
9344 let seed = [42; 32];
9345 let network = Network::Testnet;
9346 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9347 let logger = test_utils::TestLogger::new();
9349 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9350 let config = UserConfig::default();
9351 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9352 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9354 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9355 channel_type_features.set_zero_conf_required();
9357 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9358 open_channel_msg.channel_type = Some(channel_type_features);
9359 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9360 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9361 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9362 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9363 assert!(res.is_ok());
9367 fn test_supports_anchors_zero_htlc_tx_fee() {
9368 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9369 // resulting `channel_type`.
9370 let secp_ctx = Secp256k1::new();
9371 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9372 let network = Network::Testnet;
9373 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9374 let logger = test_utils::TestLogger::new();
9376 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9377 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9379 let mut config = UserConfig::default();
9380 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9382 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9383 // need to signal it.
9384 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9385 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9386 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9387 &config, 0, 42, None
9389 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9391 let mut expected_channel_type = ChannelTypeFeatures::empty();
9392 expected_channel_type.set_static_remote_key_required();
9393 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9395 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9396 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9397 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9401 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9402 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9403 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9404 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9405 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9408 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9409 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9413 fn test_rejects_implicit_simple_anchors() {
9414 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9415 // each side's `InitFeatures`, it is rejected.
9416 let secp_ctx = Secp256k1::new();
9417 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9418 let network = Network::Testnet;
9419 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9420 let logger = test_utils::TestLogger::new();
9422 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9423 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9425 let config = UserConfig::default();
9427 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9428 let static_remote_key_required: u64 = 1 << 12;
9429 let simple_anchors_required: u64 = 1 << 20;
9430 let raw_init_features = static_remote_key_required | simple_anchors_required;
9431 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9433 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9434 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9435 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9439 // Set `channel_type` to `None` to force the implicit feature negotiation.
9440 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9441 open_channel_msg.channel_type = None;
9443 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9444 // `static_remote_key`, it will fail the channel.
9445 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9446 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9447 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9448 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9450 assert!(channel_b.is_err());
9454 fn test_rejects_simple_anchors_channel_type() {
9455 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9457 let secp_ctx = Secp256k1::new();
9458 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9459 let network = Network::Testnet;
9460 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9461 let logger = test_utils::TestLogger::new();
9463 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9464 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9466 let config = UserConfig::default();
9468 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9469 let static_remote_key_required: u64 = 1 << 12;
9470 let simple_anchors_required: u64 = 1 << 20;
9471 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9472 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9473 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9474 assert!(!simple_anchors_init.requires_unknown_bits());
9475 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9477 // First, we'll try to open a channel between A and B where A requests a channel type for
9478 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9479 // B as it's not supported by LDK.
9480 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9481 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9482 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9486 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9487 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9489 let res = InboundV1Channel::<&TestKeysInterface>::new(
9490 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9491 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9492 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9494 assert!(res.is_err());
9496 // Then, we'll try to open another channel where A requests a channel type for
9497 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9498 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9500 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9501 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9502 10000000, 100000, 42, &config, 0, 42, None
9505 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9507 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9508 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9509 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9510 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9513 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9514 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9516 let res = channel_a.accept_channel(
9517 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9519 assert!(res.is_err());
9523 fn test_waiting_for_batch() {
9524 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9525 let logger = test_utils::TestLogger::new();
9526 let secp_ctx = Secp256k1::new();
9527 let seed = [42; 32];
9528 let network = Network::Testnet;
9529 let best_block = BestBlock::from_network(network);
9530 let chain_hash = ChainHash::using_genesis_block(network);
9531 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9533 let mut config = UserConfig::default();
9534 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9535 // channel in a batch before all channels are ready.
9536 config.channel_handshake_limits.trust_own_funding_0conf = true;
9538 // Create a channel from node a to node b that will be part of batch funding.
9539 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9540 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9545 &channelmanager::provided_init_features(&config),
9555 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9556 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9557 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9562 &channelmanager::provided_channel_type_features(&config),
9563 &channelmanager::provided_init_features(&config),
9569 true, // Allow node b to send a 0conf channel_ready.
9572 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9573 node_a_chan.accept_channel(
9574 &accept_channel_msg,
9575 &config.channel_handshake_limits,
9576 &channelmanager::provided_init_features(&config),
9579 // Fund the channel with a batch funding transaction.
9580 let output_script = node_a_chan.context.get_funding_redeemscript();
9581 let tx = Transaction {
9583 lock_time: LockTime::ZERO,
9587 value: 10000000, script_pubkey: output_script.clone(),
9590 value: 10000000, script_pubkey: Builder::new().into_script(),
9593 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9594 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9599 ).map_err(|_| ()).unwrap();
9600 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9601 &funding_created_msg.unwrap(),
9605 ).map_err(|_| ()).unwrap();
9606 let node_b_updates = node_b_chan.monitor_updating_restored(
9614 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9615 // broadcasting the funding transaction until the batch is ready.
9616 let _ = node_a_chan.funding_signed(
9617 &funding_signed_msg.unwrap(),
9622 let node_a_updates = node_a_chan.monitor_updating_restored(
9629 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9630 // as the funding transaction depends on all channels in the batch becoming ready.
9631 assert!(node_a_updates.channel_ready.is_none());
9632 assert!(node_a_updates.funding_broadcastable.is_none());
9634 node_a_chan.context.channel_state,
9635 ChannelState::FundingSent as u32 |
9636 ChannelState::WaitingForBatch as u32,
9639 // It is possible to receive a 0conf channel_ready from the remote node.
9640 node_a_chan.channel_ready(
9641 &node_b_updates.channel_ready.unwrap(),
9649 node_a_chan.context.channel_state,
9650 ChannelState::FundingSent as u32 |
9651 ChannelState::WaitingForBatch as u32 |
9652 ChannelState::TheirChannelReady as u32,
9655 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9656 node_a_chan.set_batch_ready();
9658 node_a_chan.context.channel_state,
9659 ChannelState::FundingSent as u32 |
9660 ChannelState::TheirChannelReady as u32,
9662 assert!(node_a_chan.check_get_channel_ready(0).is_some());