1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
264 /// There are a few "states" and then a number of flags which can be applied:
265 /// We first move through init with `OurInitSent` -> `TheirInitSent` -> `FundingCreated` -> `FundingSent`.
266 /// `TheirChannelReady` and `OurChannelReady` then get set on `FundingSent`, and when both are set we
267 /// move on to `ChannelReady`.
268 /// Note that `PeerDisconnected` can be set on both `ChannelReady` and `FundingSent`.
269 /// `ChannelReady` can then get all remaining flags set on it, until we finish shutdown, then we
270 /// move on to `ShutdownComplete`, at which point most calls into this channel are disallowed.
272 /// Implies we have (or are prepared to) send our open_channel/accept_channel message
273 OurInitSent = 1 << 0,
274 /// Implies we have received their `open_channel`/`accept_channel` message
275 TheirInitSent = 1 << 1,
276 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to `FundingSent`.
277 /// Note that this is nonsense for an inbound channel as we immediately generate `funding_signed`
278 /// upon receipt of `funding_created`, so simply skip this state.
280 /// Set when we have received/sent `funding_created` and `funding_signed` and are thus now waiting
281 /// on the funding transaction to confirm. The `ChannelReady` flags are set to indicate when we
282 /// and our counterparty consider the funding transaction confirmed.
284 /// Flag which can be set on `FundingSent` to indicate they sent us a `channel_ready` message.
285 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
286 TheirChannelReady = 1 << 4,
287 /// Flag which can be set on `FundingSent` to indicate we sent them a `channel_ready` message.
288 /// Once both `TheirChannelReady` and `OurChannelReady` are set, state moves on to `ChannelReady`.
289 OurChannelReady = 1 << 5,
291 /// Flag which is set on `ChannelReady` and `FundingSent` indicating remote side is considered
292 /// "disconnected" and no updates are allowed until after we've done a `channel_reestablish`
294 PeerDisconnected = 1 << 7,
295 /// Flag which is set on `ChannelReady`, FundingCreated, and `FundingSent` indicating the user has
296 /// told us a `ChannelMonitor` update is pending async persistence somewhere and we should pause
297 /// sending any outbound messages until they've managed to finish.
298 MonitorUpdateInProgress = 1 << 8,
299 /// Flag which implies that we have sent a commitment_signed but are awaiting the responding
300 /// revoke_and_ack message. During this time period, we can't generate new commitment_signed
301 /// messages as then we will be unable to determine which HTLCs they included in their
302 /// revoke_and_ack implicit ACK, so instead we have to hold them away temporarily to be sent
304 /// Flag is set on `ChannelReady`.
305 AwaitingRemoteRevoke = 1 << 9,
306 /// Flag which is set on `ChannelReady` or `FundingSent` after receiving a shutdown message from
307 /// the remote end. If set, they may not add any new HTLCs to the channel, and we are expected
308 /// to respond with our own shutdown message when possible.
309 RemoteShutdownSent = 1 << 10,
310 /// Flag which is set on `ChannelReady` or `FundingSent` after sending a shutdown message. At this
311 /// point, we may not add any new HTLCs to the channel.
312 LocalShutdownSent = 1 << 11,
313 /// We've successfully negotiated a closing_signed dance. At this point ChannelManager is about
314 /// to drop us, but we store this anyway.
315 ShutdownComplete = 4096,
316 /// Flag which is set on `FundingSent` to indicate this channel is funded in a batch and the
317 /// broadcasting of the funding transaction is being held until all channels in the batch
318 /// have received funding_signed and have their monitors persisted.
319 WaitingForBatch = 1 << 13,
321 const BOTH_SIDES_SHUTDOWN_MASK: u32 =
322 ChannelState::LocalShutdownSent as u32 |
323 ChannelState::RemoteShutdownSent as u32;
324 const MULTI_STATE_FLAGS: u32 =
325 BOTH_SIDES_SHUTDOWN_MASK |
326 ChannelState::PeerDisconnected as u32 |
327 ChannelState::MonitorUpdateInProgress as u32;
328 const STATE_FLAGS: u32 =
330 ChannelState::TheirChannelReady as u32 |
331 ChannelState::OurChannelReady as u32 |
332 ChannelState::AwaitingRemoteRevoke as u32 |
333 ChannelState::WaitingForBatch as u32;
335 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
337 pub const DEFAULT_MAX_HTLCS: u16 = 50;
339 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
340 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
341 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
342 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
346 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
348 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
350 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
352 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
353 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
354 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
355 /// `holder_max_htlc_value_in_flight_msat`.
356 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
358 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
359 /// `option_support_large_channel` (aka wumbo channels) is not supported.
361 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
363 /// Total bitcoin supply in satoshis.
364 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
366 /// The maximum network dust limit for standard script formats. This currently represents the
367 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
368 /// transaction non-standard and thus refuses to relay it.
369 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
370 /// implementations use this value for their dust limit today.
371 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
373 /// The maximum channel dust limit we will accept from our counterparty.
374 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
376 /// The dust limit is used for both the commitment transaction outputs as well as the closing
377 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
378 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
379 /// In order to avoid having to concern ourselves with standardness during the closing process, we
380 /// simply require our counterparty to use a dust limit which will leave any segwit output
382 /// See <https://github.com/lightning/bolts/issues/905> for more details.
383 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
385 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
386 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
388 /// Used to return a simple Error back to ChannelManager. Will get converted to a
389 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
390 /// channel_id in ChannelManager.
391 pub(super) enum ChannelError {
397 impl fmt::Debug for ChannelError {
398 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
400 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
401 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
402 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
407 impl fmt::Display for ChannelError {
408 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
410 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
411 &ChannelError::Warn(ref e) => write!(f, "{}", e),
412 &ChannelError::Close(ref e) => write!(f, "{}", e),
417 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
419 pub peer_id: Option<PublicKey>,
420 pub channel_id: Option<ChannelId>,
423 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
424 fn log(&self, mut record: Record) {
425 record.peer_id = self.peer_id;
426 record.channel_id = self.channel_id;
427 self.logger.log(record)
431 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
432 where L::Target: Logger {
433 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
434 where S::Target: SignerProvider
438 peer_id: Some(context.counterparty_node_id),
439 channel_id: Some(context.channel_id),
444 macro_rules! secp_check {
445 ($res: expr, $err: expr) => {
448 Err(_) => return Err(ChannelError::Close($err)),
453 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
454 /// our counterparty or not. However, we don't want to announce updates right away to avoid
455 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
456 /// our channel_update message and track the current state here.
457 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
458 #[derive(Clone, Copy, PartialEq)]
459 pub(super) enum ChannelUpdateStatus {
460 /// We've announced the channel as enabled and are connected to our peer.
462 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
464 /// Our channel is live again, but we haven't announced the channel as enabled yet.
466 /// We've announced the channel as disabled.
470 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
472 pub enum AnnouncementSigsState {
473 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
474 /// we sent the last `AnnouncementSignatures`.
476 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
477 /// This state never appears on disk - instead we write `NotSent`.
479 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
480 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
481 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
482 /// they send back a `RevokeAndACK`.
483 /// This state never appears on disk - instead we write `NotSent`.
485 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
486 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
490 /// An enum indicating whether the local or remote side offered a given HTLC.
496 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
499 pending_htlcs_value_msat: u64,
500 on_counterparty_tx_dust_exposure_msat: u64,
501 on_holder_tx_dust_exposure_msat: u64,
502 holding_cell_msat: u64,
503 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
506 /// An enum gathering stats on commitment transaction, either local or remote.
507 struct CommitmentStats<'a> {
508 tx: CommitmentTransaction, // the transaction info
509 feerate_per_kw: u32, // the feerate included to build the transaction
510 total_fee_sat: u64, // the total fee included in the transaction
511 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
512 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
513 local_balance_msat: u64, // local balance before fees but considering dust limits
514 remote_balance_msat: u64, // remote balance before fees but considering dust limits
515 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
516 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
519 /// Used when calculating whether we or the remote can afford an additional HTLC.
520 struct HTLCCandidate {
522 origin: HTLCInitiator,
526 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
534 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
536 enum UpdateFulfillFetch {
538 monitor_update: ChannelMonitorUpdate,
539 htlc_value_msat: u64,
540 msg: Option<msgs::UpdateFulfillHTLC>,
545 /// The return type of get_update_fulfill_htlc_and_commit.
546 pub enum UpdateFulfillCommitFetch {
547 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
548 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
549 /// previously placed in the holding cell (and has since been removed).
551 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
552 monitor_update: ChannelMonitorUpdate,
553 /// The value of the HTLC which was claimed, in msat.
554 htlc_value_msat: u64,
556 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
557 /// or has been forgotten (presumably previously claimed).
561 /// The return value of `monitor_updating_restored`
562 pub(super) struct MonitorRestoreUpdates {
563 pub raa: Option<msgs::RevokeAndACK>,
564 pub commitment_update: Option<msgs::CommitmentUpdate>,
565 pub order: RAACommitmentOrder,
566 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
567 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
568 pub finalized_claimed_htlcs: Vec<HTLCSource>,
569 pub funding_broadcastable: Option<Transaction>,
570 pub channel_ready: Option<msgs::ChannelReady>,
571 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
574 /// The return value of `signer_maybe_unblocked`
576 pub(super) struct SignerResumeUpdates {
577 pub commitment_update: Option<msgs::CommitmentUpdate>,
578 pub funding_signed: Option<msgs::FundingSigned>,
579 pub funding_created: Option<msgs::FundingCreated>,
580 pub channel_ready: Option<msgs::ChannelReady>,
583 /// The return value of `channel_reestablish`
584 pub(super) struct ReestablishResponses {
585 pub channel_ready: Option<msgs::ChannelReady>,
586 pub raa: Option<msgs::RevokeAndACK>,
587 pub commitment_update: Option<msgs::CommitmentUpdate>,
588 pub order: RAACommitmentOrder,
589 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
590 pub shutdown_msg: Option<msgs::Shutdown>,
593 /// The result of a shutdown that should be handled.
595 pub(crate) struct ShutdownResult {
596 /// A channel monitor update to apply.
597 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
598 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
599 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
600 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
601 /// propagated to the remainder of the batch.
602 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
603 pub(crate) channel_id: ChannelId,
604 pub(crate) counterparty_node_id: PublicKey,
607 /// If the majority of the channels funds are to the fundee and the initiator holds only just
608 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
609 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
610 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
611 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
612 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
613 /// by this multiple without hitting this case, before sending.
614 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
615 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
616 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
617 /// leave the channel less usable as we hold a bigger reserve.
618 #[cfg(any(fuzzing, test))]
619 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
620 #[cfg(not(any(fuzzing, test)))]
621 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
623 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
624 /// channel creation on an inbound channel, we simply force-close and move on.
625 /// This constant is the one suggested in BOLT 2.
626 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
628 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
629 /// not have enough balance value remaining to cover the onchain cost of this new
630 /// HTLC weight. If this happens, our counterparty fails the reception of our
631 /// commitment_signed including this new HTLC due to infringement on the channel
633 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
634 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
635 /// leads to a channel force-close. Ultimately, this is an issue coming from the
636 /// design of LN state machines, allowing asynchronous updates.
637 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
639 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
640 /// commitment transaction fees, with at least this many HTLCs present on the commitment
641 /// transaction (not counting the value of the HTLCs themselves).
642 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
644 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
645 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
646 /// ChannelUpdate prompted by the config update. This value was determined as follows:
648 /// * The expected interval between ticks (1 minute).
649 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
650 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
651 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
652 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
654 /// The number of ticks that may elapse while we're waiting for a response to a
655 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
658 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
659 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
661 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
662 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
663 /// exceeding this age limit will be force-closed and purged from memory.
664 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
666 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
667 pub(crate) const COINBASE_MATURITY: u32 = 100;
669 struct PendingChannelMonitorUpdate {
670 update: ChannelMonitorUpdate,
673 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
674 (0, update, required),
677 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
678 /// its variants containing an appropriate channel struct.
679 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
680 UnfundedOutboundV1(OutboundV1Channel<SP>),
681 UnfundedInboundV1(InboundV1Channel<SP>),
685 impl<'a, SP: Deref> ChannelPhase<SP> where
686 SP::Target: SignerProvider,
687 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
689 pub fn context(&'a self) -> &'a ChannelContext<SP> {
691 ChannelPhase::Funded(chan) => &chan.context,
692 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
693 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
697 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
699 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
700 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
701 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
706 /// Contains all state common to unfunded inbound/outbound channels.
707 pub(super) struct UnfundedChannelContext {
708 /// A counter tracking how many ticks have elapsed since this unfunded channel was
709 /// created. If this unfunded channel reaches peer has yet to respond after reaching
710 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
712 /// This is so that we don't keep channels around that haven't progressed to a funded state
713 /// in a timely manner.
714 unfunded_channel_age_ticks: usize,
717 impl UnfundedChannelContext {
718 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
719 /// having reached the unfunded channel age limit.
721 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
722 pub fn should_expire_unfunded_channel(&mut self) -> bool {
723 self.unfunded_channel_age_ticks += 1;
724 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
728 /// Contains everything about the channel including state, and various flags.
729 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
730 config: LegacyChannelConfig,
732 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
733 // constructed using it. The second element in the tuple corresponds to the number of ticks that
734 // have elapsed since the update occurred.
735 prev_config: Option<(ChannelConfig, usize)>,
737 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
741 /// The current channel ID.
742 channel_id: ChannelId,
743 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
744 /// Will be `None` for channels created prior to 0.0.115.
745 temporary_channel_id: Option<ChannelId>,
748 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
749 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
751 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
752 // Note that a number of our tests were written prior to the behavior here which retransmits
753 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
755 #[cfg(any(test, feature = "_test_utils"))]
756 pub(crate) announcement_sigs_state: AnnouncementSigsState,
757 #[cfg(not(any(test, feature = "_test_utils")))]
758 announcement_sigs_state: AnnouncementSigsState,
760 secp_ctx: Secp256k1<secp256k1::All>,
761 channel_value_satoshis: u64,
763 latest_monitor_update_id: u64,
765 holder_signer: ChannelSignerType<SP>,
766 shutdown_scriptpubkey: Option<ShutdownScript>,
767 destination_script: ScriptBuf,
769 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
770 // generation start at 0 and count up...this simplifies some parts of implementation at the
771 // cost of others, but should really just be changed.
773 cur_holder_commitment_transaction_number: u64,
774 cur_counterparty_commitment_transaction_number: u64,
775 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
776 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
777 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
778 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
780 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
781 /// need to ensure we resend them in the order we originally generated them. Note that because
782 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
783 /// sufficient to simply set this to the opposite of any message we are generating as we
784 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
785 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
787 resend_order: RAACommitmentOrder,
789 monitor_pending_channel_ready: bool,
790 monitor_pending_revoke_and_ack: bool,
791 monitor_pending_commitment_signed: bool,
793 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
794 // responsible for some of the HTLCs here or not - we don't know whether the update in question
795 // completed or not. We currently ignore these fields entirely when force-closing a channel,
796 // but need to handle this somehow or we run the risk of losing HTLCs!
797 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
798 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
799 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
801 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
802 /// but our signer (initially) refused to give us a signature, we should retry at some point in
803 /// the future when the signer indicates it may have a signature for us.
805 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
806 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
807 signer_pending_commitment_update: bool,
808 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
809 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
810 /// outbound or inbound.
811 signer_pending_funding: bool,
813 // pending_update_fee is filled when sending and receiving update_fee.
815 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
816 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
817 // generating new commitment transactions with exactly the same criteria as inbound/outbound
818 // HTLCs with similar state.
819 pending_update_fee: Option<(u32, FeeUpdateState)>,
820 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
821 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
822 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
823 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
824 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
825 holding_cell_update_fee: Option<u32>,
826 next_holder_htlc_id: u64,
827 next_counterparty_htlc_id: u64,
830 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
831 /// when the channel is updated in ways which may impact the `channel_update` message or when a
832 /// new block is received, ensuring it's always at least moderately close to the current real
834 update_time_counter: u32,
836 #[cfg(debug_assertions)]
837 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
838 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
839 #[cfg(debug_assertions)]
840 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
841 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
843 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
844 target_closing_feerate_sats_per_kw: Option<u32>,
846 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
847 /// update, we need to delay processing it until later. We do that here by simply storing the
848 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
849 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
851 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
852 /// transaction. These are set once we reach `closing_negotiation_ready`.
854 pub(crate) closing_fee_limits: Option<(u64, u64)>,
856 closing_fee_limits: Option<(u64, u64)>,
858 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
859 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
860 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
861 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
862 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
864 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
865 /// until we see a `commitment_signed` before doing so.
867 /// We don't bother to persist this - we anticipate this state won't last longer than a few
868 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
869 expecting_peer_commitment_signed: bool,
871 /// The hash of the block in which the funding transaction was included.
872 funding_tx_confirmed_in: Option<BlockHash>,
873 funding_tx_confirmation_height: u32,
874 short_channel_id: Option<u64>,
875 /// Either the height at which this channel was created or the height at which it was last
876 /// serialized if it was serialized by versions prior to 0.0.103.
877 /// We use this to close if funding is never broadcasted.
878 channel_creation_height: u32,
880 counterparty_dust_limit_satoshis: u64,
883 pub(super) holder_dust_limit_satoshis: u64,
885 holder_dust_limit_satoshis: u64,
888 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
890 counterparty_max_htlc_value_in_flight_msat: u64,
893 pub(super) holder_max_htlc_value_in_flight_msat: u64,
895 holder_max_htlc_value_in_flight_msat: u64,
897 /// minimum channel reserve for self to maintain - set by them.
898 counterparty_selected_channel_reserve_satoshis: Option<u64>,
901 pub(super) holder_selected_channel_reserve_satoshis: u64,
903 holder_selected_channel_reserve_satoshis: u64,
905 counterparty_htlc_minimum_msat: u64,
906 holder_htlc_minimum_msat: u64,
908 pub counterparty_max_accepted_htlcs: u16,
910 counterparty_max_accepted_htlcs: u16,
911 holder_max_accepted_htlcs: u16,
912 minimum_depth: Option<u32>,
914 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
916 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
917 funding_transaction: Option<Transaction>,
918 is_batch_funding: Option<()>,
920 counterparty_cur_commitment_point: Option<PublicKey>,
921 counterparty_prev_commitment_point: Option<PublicKey>,
922 counterparty_node_id: PublicKey,
924 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
926 commitment_secrets: CounterpartyCommitmentSecrets,
928 channel_update_status: ChannelUpdateStatus,
929 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
930 /// not complete within a single timer tick (one minute), we should force-close the channel.
931 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
933 /// Note that this field is reset to false on deserialization to give us a chance to connect to
934 /// our peer and start the closing_signed negotiation fresh.
935 closing_signed_in_flight: bool,
937 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
938 /// This can be used to rebroadcast the channel_announcement message later.
939 announcement_sigs: Option<(Signature, Signature)>,
941 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
942 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
943 // be, by comparing the cached values to the fee of the tranaction generated by
944 // `build_commitment_transaction`.
945 #[cfg(any(test, fuzzing))]
946 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
947 #[cfg(any(test, fuzzing))]
948 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
950 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
951 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
952 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
953 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
954 /// message until we receive a channel_reestablish.
956 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
957 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
959 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
960 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
961 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
962 /// unblock the state machine.
964 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
965 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
966 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
968 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
969 /// [`msgs::RevokeAndACK`] message from the counterparty.
970 sent_message_awaiting_response: Option<usize>,
972 #[cfg(any(test, fuzzing))]
973 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
974 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
975 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
976 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
977 // is fine, but as a sanity check in our failure to generate the second claim, we check here
978 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
979 historical_inbound_htlc_fulfills: HashSet<u64>,
981 /// This channel's type, as negotiated during channel open
982 channel_type: ChannelTypeFeatures,
984 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
985 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
986 // the channel's funding UTXO.
988 // We also use this when sending our peer a channel_update that isn't to be broadcasted
989 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
990 // associated channel mapping.
992 // We only bother storing the most recent SCID alias at any time, though our counterparty has
993 // to store all of them.
994 latest_inbound_scid_alias: Option<u64>,
996 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
997 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
998 // don't currently support node id aliases and eventually privacy should be provided with
999 // blinded paths instead of simple scid+node_id aliases.
1000 outbound_scid_alias: u64,
1002 // We track whether we already emitted a `ChannelPending` event.
1003 channel_pending_event_emitted: bool,
1005 // We track whether we already emitted a `ChannelReady` event.
1006 channel_ready_event_emitted: bool,
1008 /// The unique identifier used to re-derive the private key material for the channel through
1009 /// [`SignerProvider::derive_channel_signer`].
1010 channel_keys_id: [u8; 32],
1012 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1013 /// store it here and only release it to the `ChannelManager` once it asks for it.
1014 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1017 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1018 /// Allowed in any state (including after shutdown)
1019 pub fn get_update_time_counter(&self) -> u32 {
1020 self.update_time_counter
1023 pub fn get_latest_monitor_update_id(&self) -> u64 {
1024 self.latest_monitor_update_id
1027 pub fn should_announce(&self) -> bool {
1028 self.config.announced_channel
1031 pub fn is_outbound(&self) -> bool {
1032 self.channel_transaction_parameters.is_outbound_from_holder
1035 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1036 /// Allowed in any state (including after shutdown)
1037 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1038 self.config.options.forwarding_fee_base_msat
1041 /// Returns true if we've ever received a message from the remote end for this Channel
1042 pub fn have_received_message(&self) -> bool {
1043 self.channel_state & !STATE_FLAGS > (ChannelState::OurInitSent as u32)
1046 /// Returns true if this channel is fully established and not known to be closing.
1047 /// Allowed in any state (including after shutdown)
1048 pub fn is_usable(&self) -> bool {
1049 let mask = ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK;
1050 (self.channel_state & mask) == (ChannelState::ChannelReady as u32) && !self.monitor_pending_channel_ready
1053 /// shutdown state returns the state of the channel in its various stages of shutdown
1054 pub fn shutdown_state(&self) -> ChannelShutdownState {
1055 if self.channel_state & (ChannelState::ShutdownComplete as u32) != 0 {
1056 return ChannelShutdownState::ShutdownComplete;
1058 if self.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 && self.channel_state & (ChannelState::RemoteShutdownSent as u32) == 0 {
1059 return ChannelShutdownState::ShutdownInitiated;
1061 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && !self.closing_negotiation_ready() {
1062 return ChannelShutdownState::ResolvingHTLCs;
1064 if (self.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0) && self.closing_negotiation_ready() {
1065 return ChannelShutdownState::NegotiatingClosingFee;
1067 return ChannelShutdownState::NotShuttingDown;
1070 fn closing_negotiation_ready(&self) -> bool {
1071 self.pending_inbound_htlcs.is_empty() &&
1072 self.pending_outbound_htlcs.is_empty() &&
1073 self.pending_update_fee.is_none() &&
1074 self.channel_state &
1075 (BOTH_SIDES_SHUTDOWN_MASK |
1076 ChannelState::AwaitingRemoteRevoke as u32 |
1077 ChannelState::PeerDisconnected as u32 |
1078 ChannelState::MonitorUpdateInProgress as u32) == BOTH_SIDES_SHUTDOWN_MASK
1081 /// Returns true if this channel is currently available for use. This is a superset of
1082 /// is_usable() and considers things like the channel being temporarily disabled.
1083 /// Allowed in any state (including after shutdown)
1084 pub fn is_live(&self) -> bool {
1085 self.is_usable() && (self.channel_state & (ChannelState::PeerDisconnected as u32) == 0)
1088 // Public utilities:
1090 pub fn channel_id(&self) -> ChannelId {
1094 // Return the `temporary_channel_id` used during channel establishment.
1096 // Will return `None` for channels created prior to LDK version 0.0.115.
1097 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1098 self.temporary_channel_id
1101 pub fn minimum_depth(&self) -> Option<u32> {
1105 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1106 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1107 pub fn get_user_id(&self) -> u128 {
1111 /// Gets the channel's type
1112 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1116 /// Gets the channel's `short_channel_id`.
1118 /// Will return `None` if the channel hasn't been confirmed yet.
1119 pub fn get_short_channel_id(&self) -> Option<u64> {
1120 self.short_channel_id
1123 /// Allowed in any state (including after shutdown)
1124 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1125 self.latest_inbound_scid_alias
1128 /// Allowed in any state (including after shutdown)
1129 pub fn outbound_scid_alias(&self) -> u64 {
1130 self.outbound_scid_alias
1133 /// Returns the holder signer for this channel.
1135 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1136 return &self.holder_signer
1139 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1140 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1141 /// or prior to any channel actions during `Channel` initialization.
1142 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1143 debug_assert_eq!(self.outbound_scid_alias, 0);
1144 self.outbound_scid_alias = outbound_scid_alias;
1147 /// Returns the funding_txo we either got from our peer, or were given by
1148 /// get_funding_created.
1149 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1150 self.channel_transaction_parameters.funding_outpoint
1153 /// Returns the height in which our funding transaction was confirmed.
1154 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1155 let conf_height = self.funding_tx_confirmation_height;
1156 if conf_height > 0 {
1163 /// Returns the block hash in which our funding transaction was confirmed.
1164 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1165 self.funding_tx_confirmed_in
1168 /// Returns the current number of confirmations on the funding transaction.
1169 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1170 if self.funding_tx_confirmation_height == 0 {
1171 // We either haven't seen any confirmation yet, or observed a reorg.
1175 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1178 fn get_holder_selected_contest_delay(&self) -> u16 {
1179 self.channel_transaction_parameters.holder_selected_contest_delay
1182 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1183 &self.channel_transaction_parameters.holder_pubkeys
1186 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1187 self.channel_transaction_parameters.counterparty_parameters
1188 .as_ref().map(|params| params.selected_contest_delay)
1191 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1192 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1195 /// Allowed in any state (including after shutdown)
1196 pub fn get_counterparty_node_id(&self) -> PublicKey {
1197 self.counterparty_node_id
1200 /// Allowed in any state (including after shutdown)
1201 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1202 self.holder_htlc_minimum_msat
1205 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1206 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1207 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1210 /// Allowed in any state (including after shutdown)
1211 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1213 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1214 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1215 // channel might have been used to route very small values (either by honest users or as DoS).
1216 self.channel_value_satoshis * 1000 * 9 / 10,
1218 self.counterparty_max_htlc_value_in_flight_msat
1222 /// Allowed in any state (including after shutdown)
1223 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1224 self.counterparty_htlc_minimum_msat
1227 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1228 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1229 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1232 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1233 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1234 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1236 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1237 party_max_htlc_value_in_flight_msat
1242 pub fn get_value_satoshis(&self) -> u64 {
1243 self.channel_value_satoshis
1246 pub fn get_fee_proportional_millionths(&self) -> u32 {
1247 self.config.options.forwarding_fee_proportional_millionths
1250 pub fn get_cltv_expiry_delta(&self) -> u16 {
1251 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1254 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1255 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1256 where F::Target: FeeEstimator
1258 match self.config.options.max_dust_htlc_exposure {
1259 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1260 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1261 ConfirmationTarget::OnChainSweep) as u64;
1262 feerate_per_kw.saturating_mul(multiplier)
1264 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1268 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1269 pub fn prev_config(&self) -> Option<ChannelConfig> {
1270 self.prev_config.map(|prev_config| prev_config.0)
1273 // Checks whether we should emit a `ChannelPending` event.
1274 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1275 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1278 // Returns whether we already emitted a `ChannelPending` event.
1279 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1280 self.channel_pending_event_emitted
1283 // Remembers that we already emitted a `ChannelPending` event.
1284 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1285 self.channel_pending_event_emitted = true;
1288 // Checks whether we should emit a `ChannelReady` event.
1289 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1290 self.is_usable() && !self.channel_ready_event_emitted
1293 // Remembers that we already emitted a `ChannelReady` event.
1294 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1295 self.channel_ready_event_emitted = true;
1298 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1299 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1300 /// no longer be considered when forwarding HTLCs.
1301 pub fn maybe_expire_prev_config(&mut self) {
1302 if self.prev_config.is_none() {
1305 let prev_config = self.prev_config.as_mut().unwrap();
1307 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1308 self.prev_config = None;
1312 /// Returns the current [`ChannelConfig`] applied to the channel.
1313 pub fn config(&self) -> ChannelConfig {
1317 /// Updates the channel's config. A bool is returned indicating whether the config update
1318 /// applied resulted in a new ChannelUpdate message.
1319 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1320 let did_channel_update =
1321 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1322 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1323 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1324 if did_channel_update {
1325 self.prev_config = Some((self.config.options, 0));
1326 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1327 // policy change to propagate throughout the network.
1328 self.update_time_counter += 1;
1330 self.config.options = *config;
1334 /// Returns true if funding_signed was sent/received and the
1335 /// funding transaction has been broadcast if necessary.
1336 pub fn is_funding_broadcast(&self) -> bool {
1337 self.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 &&
1338 self.channel_state & ChannelState::WaitingForBatch as u32 == 0
1341 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1342 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1343 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1344 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1345 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1347 /// @local is used only to convert relevant internal structures which refer to remote vs local
1348 /// to decide value of outputs and direction of HTLCs.
1349 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1350 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1351 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1352 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1353 /// which peer generated this transaction and "to whom" this transaction flows.
1355 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1356 where L::Target: Logger
1358 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1359 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1360 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1362 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1363 let mut remote_htlc_total_msat = 0;
1364 let mut local_htlc_total_msat = 0;
1365 let mut value_to_self_msat_offset = 0;
1367 let mut feerate_per_kw = self.feerate_per_kw;
1368 if let Some((feerate, update_state)) = self.pending_update_fee {
1369 if match update_state {
1370 // Note that these match the inclusion criteria when scanning
1371 // pending_inbound_htlcs below.
1372 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1373 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1374 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1376 feerate_per_kw = feerate;
1380 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1381 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1382 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1384 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1386 macro_rules! get_htlc_in_commitment {
1387 ($htlc: expr, $offered: expr) => {
1388 HTLCOutputInCommitment {
1390 amount_msat: $htlc.amount_msat,
1391 cltv_expiry: $htlc.cltv_expiry,
1392 payment_hash: $htlc.payment_hash,
1393 transaction_output_index: None
1398 macro_rules! add_htlc_output {
1399 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1400 if $outbound == local { // "offered HTLC output"
1401 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1402 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1405 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1407 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1408 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1409 included_non_dust_htlcs.push((htlc_in_tx, $source));
1411 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1412 included_dust_htlcs.push((htlc_in_tx, $source));
1415 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1416 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1419 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1421 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1422 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1423 included_non_dust_htlcs.push((htlc_in_tx, $source));
1425 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1426 included_dust_htlcs.push((htlc_in_tx, $source));
1432 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1434 for ref htlc in self.pending_inbound_htlcs.iter() {
1435 let (include, state_name) = match htlc.state {
1436 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1437 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1438 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1439 InboundHTLCState::Committed => (true, "Committed"),
1440 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1444 add_htlc_output!(htlc, false, None, state_name);
1445 remote_htlc_total_msat += htlc.amount_msat;
1447 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1449 &InboundHTLCState::LocalRemoved(ref reason) => {
1450 if generated_by_local {
1451 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1452 inbound_htlc_preimages.push(preimage);
1453 value_to_self_msat_offset += htlc.amount_msat as i64;
1463 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1465 for ref htlc in self.pending_outbound_htlcs.iter() {
1466 let (include, state_name) = match htlc.state {
1467 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1468 OutboundHTLCState::Committed => (true, "Committed"),
1469 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1470 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1471 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1474 let preimage_opt = match htlc.state {
1475 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1476 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1477 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1481 if let Some(preimage) = preimage_opt {
1482 outbound_htlc_preimages.push(preimage);
1486 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1487 local_htlc_total_msat += htlc.amount_msat;
1489 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1491 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1492 value_to_self_msat_offset -= htlc.amount_msat as i64;
1494 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1495 if !generated_by_local {
1496 value_to_self_msat_offset -= htlc.amount_msat as i64;
1504 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1505 assert!(value_to_self_msat >= 0);
1506 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1507 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1508 // "violate" their reserve value by couting those against it. Thus, we have to convert
1509 // everything to i64 before subtracting as otherwise we can overflow.
1510 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1511 assert!(value_to_remote_msat >= 0);
1513 #[cfg(debug_assertions)]
1515 // Make sure that the to_self/to_remote is always either past the appropriate
1516 // channel_reserve *or* it is making progress towards it.
1517 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1518 self.holder_max_commitment_tx_output.lock().unwrap()
1520 self.counterparty_max_commitment_tx_output.lock().unwrap()
1522 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1523 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1524 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1525 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1528 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1529 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1530 let (value_to_self, value_to_remote) = if self.is_outbound() {
1531 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1533 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1536 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1537 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1538 let (funding_pubkey_a, funding_pubkey_b) = if local {
1539 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1541 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1544 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1545 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1550 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1551 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1556 let num_nondust_htlcs = included_non_dust_htlcs.len();
1558 let channel_parameters =
1559 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1560 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1561 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1568 &mut included_non_dust_htlcs,
1571 let mut htlcs_included = included_non_dust_htlcs;
1572 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1573 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1574 htlcs_included.append(&mut included_dust_htlcs);
1576 // For the stats, trimmed-to-0 the value in msats accordingly
1577 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1578 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1586 local_balance_msat: value_to_self_msat as u64,
1587 remote_balance_msat: value_to_remote_msat as u64,
1588 inbound_htlc_preimages,
1589 outbound_htlc_preimages,
1594 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1595 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1596 /// our counterparty!)
1597 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1598 /// TODO Some magic rust shit to compile-time check this?
1599 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1600 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1601 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1602 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1603 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1605 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1609 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1610 /// will sign and send to our counterparty.
1611 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1612 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1613 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1614 //may see payments to it!
1615 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1616 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1617 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1619 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1622 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1623 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1624 /// Panics if called before accept_channel/InboundV1Channel::new
1625 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1626 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1629 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1630 &self.get_counterparty_pubkeys().funding_pubkey
1633 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1637 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1638 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1639 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1640 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1641 // more dust balance if the feerate increases when we have several HTLCs pending
1642 // which are near the dust limit.
1643 let mut feerate_per_kw = self.feerate_per_kw;
1644 // If there's a pending update fee, use it to ensure we aren't under-estimating
1645 // potential feerate updates coming soon.
1646 if let Some((feerate, _)) = self.pending_update_fee {
1647 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1649 if let Some(feerate) = outbound_feerate_update {
1650 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1652 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1655 /// Get forwarding information for the counterparty.
1656 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1657 self.counterparty_forwarding_info.clone()
1660 /// Returns a HTLCStats about inbound pending htlcs
1661 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1663 let mut stats = HTLCStats {
1664 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1665 pending_htlcs_value_msat: 0,
1666 on_counterparty_tx_dust_exposure_msat: 0,
1667 on_holder_tx_dust_exposure_msat: 0,
1668 holding_cell_msat: 0,
1669 on_holder_tx_holding_cell_htlcs_count: 0,
1672 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1675 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1676 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1677 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1679 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1680 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1681 for ref htlc in context.pending_inbound_htlcs.iter() {
1682 stats.pending_htlcs_value_msat += htlc.amount_msat;
1683 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1684 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1686 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1687 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1693 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1694 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1696 let mut stats = HTLCStats {
1697 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1698 pending_htlcs_value_msat: 0,
1699 on_counterparty_tx_dust_exposure_msat: 0,
1700 on_holder_tx_dust_exposure_msat: 0,
1701 holding_cell_msat: 0,
1702 on_holder_tx_holding_cell_htlcs_count: 0,
1705 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1708 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1709 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1710 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1712 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1713 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1714 for ref htlc in context.pending_outbound_htlcs.iter() {
1715 stats.pending_htlcs_value_msat += htlc.amount_msat;
1716 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1717 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1719 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1720 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1724 for update in context.holding_cell_htlc_updates.iter() {
1725 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1726 stats.pending_htlcs += 1;
1727 stats.pending_htlcs_value_msat += amount_msat;
1728 stats.holding_cell_msat += amount_msat;
1729 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1730 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1732 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1733 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1735 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1742 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1743 /// Doesn't bother handling the
1744 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1745 /// corner case properly.
1746 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1747 -> AvailableBalances
1748 where F::Target: FeeEstimator
1750 let context = &self;
1751 // Note that we have to handle overflow due to the above case.
1752 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1753 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1755 let mut balance_msat = context.value_to_self_msat;
1756 for ref htlc in context.pending_inbound_htlcs.iter() {
1757 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1758 balance_msat += htlc.amount_msat;
1761 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1763 let outbound_capacity_msat = context.value_to_self_msat
1764 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1766 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1768 let mut available_capacity_msat = outbound_capacity_msat;
1770 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1771 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1775 if context.is_outbound() {
1776 // We should mind channel commit tx fee when computing how much of the available capacity
1777 // can be used in the next htlc. Mirrors the logic in send_htlc.
1779 // The fee depends on whether the amount we will be sending is above dust or not,
1780 // and the answer will in turn change the amount itself — making it a circular
1782 // This complicates the computation around dust-values, up to the one-htlc-value.
1783 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
1784 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1785 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
1788 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
1789 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
1790 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
1791 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
1792 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1793 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1794 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
1797 // We will first subtract the fee as if we were above-dust. Then, if the resulting
1798 // value ends up being below dust, we have this fee available again. In that case,
1799 // match the value to right-below-dust.
1800 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
1801 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
1802 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
1803 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
1804 debug_assert!(one_htlc_difference_msat != 0);
1805 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
1806 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
1807 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
1809 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
1812 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
1813 // sending a new HTLC won't reduce their balance below our reserve threshold.
1814 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
1815 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1816 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
1819 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
1820 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
1822 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
1823 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
1824 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
1826 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
1827 // If another HTLC's fee would reduce the remote's balance below the reserve limit
1828 // we've selected for them, we can only send dust HTLCs.
1829 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
1833 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
1835 // If we get close to our maximum dust exposure, we end up in a situation where we can send
1836 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
1837 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
1838 // send above the dust limit (as the router can always overpay to meet the dust limit).
1839 let mut remaining_msat_below_dust_exposure_limit = None;
1840 let mut dust_exposure_dust_limit_msat = 0;
1841 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
1843 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1844 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
1846 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
1847 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1848 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1850 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
1851 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1852 remaining_msat_below_dust_exposure_limit =
1853 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
1854 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
1857 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
1858 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
1859 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
1860 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
1861 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
1862 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
1865 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
1866 if available_capacity_msat < dust_exposure_dust_limit_msat {
1867 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
1869 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
1873 available_capacity_msat = cmp::min(available_capacity_msat,
1874 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
1876 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
1877 available_capacity_msat = 0;
1881 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
1882 - context.value_to_self_msat as i64
1883 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
1884 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
1886 outbound_capacity_msat,
1887 next_outbound_htlc_limit_msat: available_capacity_msat,
1888 next_outbound_htlc_minimum_msat,
1893 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
1894 let context = &self;
1895 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
1898 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
1899 /// number of pending HTLCs that are on track to be in our next commitment tx.
1901 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
1902 /// `fee_spike_buffer_htlc` is `Some`.
1904 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
1905 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
1907 /// Dust HTLCs are excluded.
1908 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
1909 let context = &self;
1910 assert!(context.is_outbound());
1912 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1915 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
1916 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
1918 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1919 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1921 let mut addl_htlcs = 0;
1922 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
1924 HTLCInitiator::LocalOffered => {
1925 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
1929 HTLCInitiator::RemoteOffered => {
1930 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
1936 let mut included_htlcs = 0;
1937 for ref htlc in context.pending_inbound_htlcs.iter() {
1938 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
1941 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
1942 // transaction including this HTLC if it times out before they RAA.
1943 included_htlcs += 1;
1946 for ref htlc in context.pending_outbound_htlcs.iter() {
1947 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
1951 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
1952 OutboundHTLCState::Committed => included_htlcs += 1,
1953 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
1954 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
1955 // transaction won't be generated until they send us their next RAA, which will mean
1956 // dropping any HTLCs in this state.
1961 for htlc in context.holding_cell_htlc_updates.iter() {
1963 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
1964 if amount_msat / 1000 < real_dust_limit_timeout_sat {
1969 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
1970 // ack we're guaranteed to never include them in commitment txs anymore.
1974 let num_htlcs = included_htlcs + addl_htlcs;
1975 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
1976 #[cfg(any(test, fuzzing))]
1979 if fee_spike_buffer_htlc.is_some() {
1980 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
1982 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
1983 + context.holding_cell_htlc_updates.len();
1984 let commitment_tx_info = CommitmentTxInfoCached {
1986 total_pending_htlcs,
1987 next_holder_htlc_id: match htlc.origin {
1988 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
1989 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
1991 next_counterparty_htlc_id: match htlc.origin {
1992 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
1993 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
1995 feerate: context.feerate_per_kw,
1997 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2002 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2003 /// pending HTLCs that are on track to be in their next commitment tx
2005 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2006 /// `fee_spike_buffer_htlc` is `Some`.
2008 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2009 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2011 /// Dust HTLCs are excluded.
2012 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2013 let context = &self;
2014 assert!(!context.is_outbound());
2016 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2019 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2020 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2022 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2023 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2025 let mut addl_htlcs = 0;
2026 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2028 HTLCInitiator::LocalOffered => {
2029 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2033 HTLCInitiator::RemoteOffered => {
2034 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2040 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2041 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2042 // committed outbound HTLCs, see below.
2043 let mut included_htlcs = 0;
2044 for ref htlc in context.pending_inbound_htlcs.iter() {
2045 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2048 included_htlcs += 1;
2051 for ref htlc in context.pending_outbound_htlcs.iter() {
2052 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2055 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2056 // i.e. if they've responded to us with an RAA after announcement.
2058 OutboundHTLCState::Committed => included_htlcs += 1,
2059 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2060 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2065 let num_htlcs = included_htlcs + addl_htlcs;
2066 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2067 #[cfg(any(test, fuzzing))]
2070 if fee_spike_buffer_htlc.is_some() {
2071 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2073 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2074 let commitment_tx_info = CommitmentTxInfoCached {
2076 total_pending_htlcs,
2077 next_holder_htlc_id: match htlc.origin {
2078 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2079 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2081 next_counterparty_htlc_id: match htlc.origin {
2082 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2083 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2085 feerate: context.feerate_per_kw,
2087 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2092 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2093 where F: Fn() -> Option<O> {
2094 if self.channel_state & ChannelState::FundingCreated as u32 != 0 ||
2095 self.channel_state & ChannelState::WaitingForBatch as u32 != 0 {
2102 /// Returns the transaction if there is a pending funding transaction that is yet to be
2104 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2105 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2108 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2110 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2111 self.if_unbroadcasted_funding(||
2112 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2116 /// Returns whether the channel is funded in a batch.
2117 pub fn is_batch_funding(&self) -> bool {
2118 self.is_batch_funding.is_some()
2121 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2123 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2124 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2127 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2128 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2129 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2130 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2131 /// immediately (others we will have to allow to time out).
2132 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2133 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2134 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2135 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2136 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2137 assert!(self.channel_state != ChannelState::ShutdownComplete as u32);
2139 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2140 // return them to fail the payment.
2141 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2142 let counterparty_node_id = self.get_counterparty_node_id();
2143 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2145 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2146 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2151 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2152 // If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
2153 // returning a channel monitor update here would imply a channel monitor update before
2154 // we even registered the channel monitor to begin with, which is invalid.
2155 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2156 // funding transaction, don't return a funding txo (which prevents providing the
2157 // monitor update to the user, even if we return one).
2158 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2159 if self.channel_state & (ChannelState::FundingSent as u32 | ChannelState::ChannelReady as u32 | ChannelState::ShutdownComplete as u32) != 0 {
2160 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2161 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2162 update_id: self.latest_monitor_update_id,
2163 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2167 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2169 self.channel_state = ChannelState::ShutdownComplete as u32;
2170 self.update_time_counter += 1;
2173 dropped_outbound_htlcs,
2174 unbroadcasted_batch_funding_txid,
2175 channel_id: self.channel_id,
2176 counterparty_node_id: self.counterparty_node_id,
2180 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2181 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2182 let counterparty_keys = self.build_remote_transaction_keys();
2183 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2184 let signature = match &self.holder_signer {
2185 // TODO (taproot|arik): move match into calling method for Taproot
2186 ChannelSignerType::Ecdsa(ecdsa) => {
2187 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2188 .map(|(sig, _)| sig).ok()?
2190 // TODO (taproot|arik)
2195 if self.signer_pending_funding {
2196 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2197 self.signer_pending_funding = false;
2200 Some(msgs::FundingCreated {
2201 temporary_channel_id: self.temporary_channel_id.unwrap(),
2202 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2203 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2206 partial_signature_with_nonce: None,
2208 next_local_nonce: None,
2212 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2213 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2214 let counterparty_keys = self.build_remote_transaction_keys();
2215 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2217 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2218 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2219 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2220 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2222 match &self.holder_signer {
2223 // TODO (arik): move match into calling method for Taproot
2224 ChannelSignerType::Ecdsa(ecdsa) => {
2225 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2226 .map(|(signature, _)| msgs::FundingSigned {
2227 channel_id: self.channel_id(),
2230 partial_signature_with_nonce: None,
2234 if funding_signed.is_none() {
2235 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2236 self.signer_pending_funding = true;
2237 } else if self.signer_pending_funding {
2238 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2239 self.signer_pending_funding = false;
2242 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2243 (counterparty_initial_commitment_tx, funding_signed)
2245 // TODO (taproot|arik)
2252 // Internal utility functions for channels
2254 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2255 /// `channel_value_satoshis` in msat, set through
2256 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2258 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2260 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2261 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2262 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2264 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2267 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2269 channel_value_satoshis * 10 * configured_percent
2272 /// Returns a minimum channel reserve value the remote needs to maintain,
2273 /// required by us according to the configured or default
2274 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2276 /// Guaranteed to return a value no larger than channel_value_satoshis
2278 /// This is used both for outbound and inbound channels and has lower bound
2279 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2280 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2281 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2282 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2285 /// This is for legacy reasons, present for forward-compatibility.
2286 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2287 /// from storage. Hence, we use this function to not persist default values of
2288 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2289 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2290 let (q, _) = channel_value_satoshis.overflowing_div(100);
2291 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2294 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2295 // Note that num_htlcs should not include dust HTLCs.
2297 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2298 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2301 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2302 // Note that num_htlcs should not include dust HTLCs.
2303 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2304 // Note that we need to divide before multiplying to round properly,
2305 // since the lowest denomination of bitcoin on-chain is the satoshi.
2306 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2309 // Holder designates channel data owned for the benefit of the user client.
2310 // Counterparty designates channel data owned by the another channel participant entity.
2311 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2312 pub context: ChannelContext<SP>,
2315 #[cfg(any(test, fuzzing))]
2316 struct CommitmentTxInfoCached {
2318 total_pending_htlcs: usize,
2319 next_holder_htlc_id: u64,
2320 next_counterparty_htlc_id: u64,
2324 impl<SP: Deref> Channel<SP> where
2325 SP::Target: SignerProvider,
2326 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2328 fn check_remote_fee<F: Deref, L: Deref>(
2329 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2330 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2331 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2333 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2334 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2336 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2338 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2339 if feerate_per_kw < lower_limit {
2340 if let Some(cur_feerate) = cur_feerate_per_kw {
2341 if feerate_per_kw > cur_feerate {
2343 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2344 cur_feerate, feerate_per_kw);
2348 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2354 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2355 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2356 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2357 // outside of those situations will fail.
2358 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2362 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2367 1 + // script length (0)
2371 )*4 + // * 4 for non-witness parts
2372 2 + // witness marker and flag
2373 1 + // witness element count
2374 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2375 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2376 2*(1 + 71); // two signatures + sighash type flags
2377 if let Some(spk) = a_scriptpubkey {
2378 ret += ((8+1) + // output values and script length
2379 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2381 if let Some(spk) = b_scriptpubkey {
2382 ret += ((8+1) + // output values and script length
2383 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2389 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2390 assert!(self.context.pending_inbound_htlcs.is_empty());
2391 assert!(self.context.pending_outbound_htlcs.is_empty());
2392 assert!(self.context.pending_update_fee.is_none());
2394 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2395 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2396 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2398 if value_to_holder < 0 {
2399 assert!(self.context.is_outbound());
2400 total_fee_satoshis += (-value_to_holder) as u64;
2401 } else if value_to_counterparty < 0 {
2402 assert!(!self.context.is_outbound());
2403 total_fee_satoshis += (-value_to_counterparty) as u64;
2406 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2407 value_to_counterparty = 0;
2410 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2411 value_to_holder = 0;
2414 assert!(self.context.shutdown_scriptpubkey.is_some());
2415 let holder_shutdown_script = self.get_closing_scriptpubkey();
2416 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2417 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2419 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2420 (closing_transaction, total_fee_satoshis)
2423 fn funding_outpoint(&self) -> OutPoint {
2424 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2427 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2430 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2431 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2433 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2435 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2436 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2437 where L::Target: Logger {
2438 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2439 // (see equivalent if condition there).
2440 assert!(self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0);
2441 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2442 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2443 self.context.latest_monitor_update_id = mon_update_id;
2444 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2445 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2449 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2450 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2451 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2452 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2454 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2455 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2457 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2459 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2460 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2461 // these, but for now we just have to treat them as normal.
2463 let mut pending_idx = core::usize::MAX;
2464 let mut htlc_value_msat = 0;
2465 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2466 if htlc.htlc_id == htlc_id_arg {
2467 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2468 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2469 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2471 InboundHTLCState::Committed => {},
2472 InboundHTLCState::LocalRemoved(ref reason) => {
2473 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2475 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2476 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2478 return UpdateFulfillFetch::DuplicateClaim {};
2481 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2482 // Don't return in release mode here so that we can update channel_monitor
2486 htlc_value_msat = htlc.amount_msat;
2490 if pending_idx == core::usize::MAX {
2491 #[cfg(any(test, fuzzing))]
2492 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2493 // this is simply a duplicate claim, not previously failed and we lost funds.
2494 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2495 return UpdateFulfillFetch::DuplicateClaim {};
2498 // Now update local state:
2500 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2501 // can claim it even if the channel hits the chain before we see their next commitment.
2502 self.context.latest_monitor_update_id += 1;
2503 let monitor_update = ChannelMonitorUpdate {
2504 update_id: self.context.latest_monitor_update_id,
2505 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2506 payment_preimage: payment_preimage_arg.clone(),
2510 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2511 // Note that this condition is the same as the assertion in
2512 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2513 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2514 // do not not get into this branch.
2515 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2516 match pending_update {
2517 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2518 if htlc_id_arg == htlc_id {
2519 // Make sure we don't leave latest_monitor_update_id incremented here:
2520 self.context.latest_monitor_update_id -= 1;
2521 #[cfg(any(test, fuzzing))]
2522 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2523 return UpdateFulfillFetch::DuplicateClaim {};
2526 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2527 if htlc_id_arg == htlc_id {
2528 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2529 // TODO: We may actually be able to switch to a fulfill here, though its
2530 // rare enough it may not be worth the complexity burden.
2531 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2532 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2538 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state);
2539 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2540 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2542 #[cfg(any(test, fuzzing))]
2543 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2544 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2546 #[cfg(any(test, fuzzing))]
2547 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2550 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2551 if let InboundHTLCState::Committed = htlc.state {
2553 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2554 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2556 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2557 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2560 UpdateFulfillFetch::NewClaim {
2563 msg: Some(msgs::UpdateFulfillHTLC {
2564 channel_id: self.context.channel_id(),
2565 htlc_id: htlc_id_arg,
2566 payment_preimage: payment_preimage_arg,
2571 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2572 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2573 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2574 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2575 // Even if we aren't supposed to let new monitor updates with commitment state
2576 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2577 // matter what. Sadly, to push a new monitor update which flies before others
2578 // already queued, we have to insert it into the pending queue and update the
2579 // update_ids of all the following monitors.
2580 if release_cs_monitor && msg.is_some() {
2581 let mut additional_update = self.build_commitment_no_status_check(logger);
2582 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2583 // to be strictly increasing by one, so decrement it here.
2584 self.context.latest_monitor_update_id = monitor_update.update_id;
2585 monitor_update.updates.append(&mut additional_update.updates);
2587 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2588 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2589 monitor_update.update_id = new_mon_id;
2590 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2591 held_update.update.update_id += 1;
2594 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2595 let update = self.build_commitment_no_status_check(logger);
2596 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2602 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2603 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2605 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2609 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2610 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2611 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2612 /// before we fail backwards.
2614 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2615 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2616 /// [`ChannelError::Ignore`].
2617 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2618 -> Result<(), ChannelError> where L::Target: Logger {
2619 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2620 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2623 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2624 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2625 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2626 /// before we fail backwards.
2628 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2629 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2630 /// [`ChannelError::Ignore`].
2631 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2632 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2633 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
2634 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2636 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
2638 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2639 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2640 // these, but for now we just have to treat them as normal.
2642 let mut pending_idx = core::usize::MAX;
2643 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2644 if htlc.htlc_id == htlc_id_arg {
2646 InboundHTLCState::Committed => {},
2647 InboundHTLCState::LocalRemoved(ref reason) => {
2648 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2650 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2655 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2656 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2662 if pending_idx == core::usize::MAX {
2663 #[cfg(any(test, fuzzing))]
2664 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2665 // is simply a duplicate fail, not previously failed and we failed-back too early.
2666 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2670 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
2671 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2672 force_holding_cell = true;
2675 // Now update local state:
2676 if force_holding_cell {
2677 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2678 match pending_update {
2679 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2680 if htlc_id_arg == htlc_id {
2681 #[cfg(any(test, fuzzing))]
2682 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2686 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2687 if htlc_id_arg == htlc_id {
2688 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2689 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2695 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2696 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2697 htlc_id: htlc_id_arg,
2703 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2705 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2706 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2709 Ok(Some(msgs::UpdateFailHTLC {
2710 channel_id: self.context.channel_id(),
2711 htlc_id: htlc_id_arg,
2716 // Message handlers:
2718 /// Handles a funding_signed message from the remote end.
2719 /// If this call is successful, broadcast the funding transaction (and not before!)
2720 pub fn funding_signed<L: Deref>(
2721 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2722 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
2726 if !self.context.is_outbound() {
2727 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2729 if self.context.channel_state & !(ChannelState::MonitorUpdateInProgress as u32) != ChannelState::FundingCreated as u32 {
2730 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2732 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2733 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2734 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2735 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2738 let funding_script = self.context.get_funding_redeemscript();
2740 let counterparty_keys = self.context.build_remote_transaction_keys();
2741 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2742 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2743 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2745 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2746 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2748 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2749 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2751 let trusted_tx = initial_commitment_tx.trust();
2752 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2753 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2754 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2755 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2756 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2760 let holder_commitment_tx = HolderCommitmentTransaction::new(
2761 initial_commitment_tx,
2764 &self.context.get_holder_pubkeys().funding_pubkey,
2765 self.context.counterparty_funding_pubkey()
2768 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2769 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
2772 let funding_redeemscript = self.context.get_funding_redeemscript();
2773 let funding_txo = self.context.get_funding_txo().unwrap();
2774 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
2775 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
2776 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
2777 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
2778 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
2779 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
2780 shutdown_script, self.context.get_holder_selected_contest_delay(),
2781 &self.context.destination_script, (funding_txo, funding_txo_script),
2782 &self.context.channel_transaction_parameters,
2783 funding_redeemscript.clone(), self.context.channel_value_satoshis,
2785 holder_commitment_tx, best_block, self.context.counterparty_node_id);
2786 channel_monitor.provide_initial_counterparty_commitment_tx(
2787 counterparty_initial_bitcoin_tx.txid, Vec::new(),
2788 self.context.cur_counterparty_commitment_transaction_number,
2789 self.context.counterparty_cur_commitment_point.unwrap(),
2790 counterparty_initial_commitment_tx.feerate_per_kw(),
2791 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
2792 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
2794 assert_eq!(self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32), 0); // We have no had any monitor(s) yet to fail update!
2795 if self.context.is_batch_funding() {
2796 self.context.channel_state = ChannelState::FundingSent as u32 | ChannelState::WaitingForBatch as u32;
2798 self.context.channel_state = ChannelState::FundingSent as u32;
2800 self.context.cur_holder_commitment_transaction_number -= 1;
2801 self.context.cur_counterparty_commitment_transaction_number -= 1;
2803 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
2805 let need_channel_ready = self.check_get_channel_ready(0).is_some();
2806 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2810 /// Updates the state of the channel to indicate that all channels in the batch have received
2811 /// funding_signed and persisted their monitors.
2812 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2813 /// treated as a non-batch channel going forward.
2814 pub fn set_batch_ready(&mut self) {
2815 self.context.is_batch_funding = None;
2816 self.context.channel_state &= !(ChannelState::WaitingForBatch as u32);
2819 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2820 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2822 pub fn channel_ready<NS: Deref, L: Deref>(
2823 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2824 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2825 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2827 NS::Target: NodeSigner,
2830 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2831 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2832 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2835 if let Some(scid_alias) = msg.short_channel_id_alias {
2836 if Some(scid_alias) != self.context.short_channel_id {
2837 // The scid alias provided can be used to route payments *from* our counterparty,
2838 // i.e. can be used for inbound payments and provided in invoices, but is not used
2839 // when routing outbound payments.
2840 self.context.latest_inbound_scid_alias = Some(scid_alias);
2844 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
2846 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2847 // batch, but we can receive channel_ready messages.
2849 non_shutdown_state & ChannelState::OurChannelReady as u32 == 0 ||
2850 non_shutdown_state & ChannelState::WaitingForBatch as u32 == 0
2852 if non_shutdown_state & !(ChannelState::WaitingForBatch as u32) == ChannelState::FundingSent as u32 {
2853 self.context.channel_state |= ChannelState::TheirChannelReady as u32;
2854 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
2855 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
2856 self.context.update_time_counter += 1;
2857 } else if self.context.channel_state & (ChannelState::ChannelReady as u32) != 0 ||
2858 // If we reconnected before sending our `channel_ready` they may still resend theirs:
2859 (self.context.channel_state & (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) ==
2860 (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32))
2862 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2863 // required, or they're sending a fresh SCID alias.
2864 let expected_point =
2865 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2866 // If they haven't ever sent an updated point, the point they send should match
2868 self.context.counterparty_cur_commitment_point
2869 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2870 // If we've advanced the commitment number once, the second commitment point is
2871 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2872 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2873 self.context.counterparty_prev_commitment_point
2875 // If they have sent updated points, channel_ready is always supposed to match
2876 // their "first" point, which we re-derive here.
2877 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2878 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2879 ).expect("We already advanced, so previous secret keys should have been validated already")))
2881 if expected_point != Some(msg.next_per_commitment_point) {
2882 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2886 return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned()));
2889 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2890 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
2892 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
2894 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
2897 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
2898 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
2899 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
2900 ) -> Result<(), ChannelError>
2901 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
2902 FE::Target: FeeEstimator, L::Target: Logger,
2904 // We can't accept HTLCs sent after we've sent a shutdown.
2905 let local_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::LocalShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2906 if local_sent_shutdown {
2907 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
2909 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
2910 let remote_sent_shutdown = (self.context.channel_state & (ChannelState::ChannelReady as u32 | ChannelState::RemoteShutdownSent as u32)) != (ChannelState::ChannelReady as u32);
2911 if remote_sent_shutdown {
2912 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
2914 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
2915 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
2917 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
2918 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
2920 if msg.amount_msat == 0 {
2921 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
2923 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
2924 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
2927 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
2928 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
2929 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
2930 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
2932 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
2933 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
2936 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
2937 // the reserve_satoshis we told them to always have as direct payment so that they lose
2938 // something if we punish them for broadcasting an old state).
2939 // Note that we don't really care about having a small/no to_remote output in our local
2940 // commitment transactions, as the purpose of the channel reserve is to ensure we can
2941 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
2942 // present in the next commitment transaction we send them (at least for fulfilled ones,
2943 // failed ones won't modify value_to_self).
2944 // Note that we will send HTLCs which another instance of rust-lightning would think
2945 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
2946 // Channel state once they will not be present in the next received commitment
2948 let mut removed_outbound_total_msat = 0;
2949 for ref htlc in self.context.pending_outbound_htlcs.iter() {
2950 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
2951 removed_outbound_total_msat += htlc.amount_msat;
2952 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
2953 removed_outbound_total_msat += htlc.amount_msat;
2957 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
2958 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2961 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
2962 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
2963 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
2965 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
2966 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
2967 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
2968 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2969 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
2970 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2971 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2975 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
2976 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
2977 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
2978 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
2979 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
2980 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
2981 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
2985 let pending_value_to_self_msat =
2986 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
2987 let pending_remote_value_msat =
2988 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
2989 if pending_remote_value_msat < msg.amount_msat {
2990 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
2993 // Check that the remote can afford to pay for this HTLC on-chain at the current
2994 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
2996 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
2997 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
2998 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3000 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3001 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3005 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3006 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3008 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3009 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3013 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3014 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3018 if !self.context.is_outbound() {
3019 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3020 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3021 // side, only on the sender's. Note that with anchor outputs we are no longer as
3022 // sensitive to fee spikes, so we need to account for them.
3023 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3024 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3025 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3026 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3028 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3029 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3030 // the HTLC, i.e. its status is already set to failing.
3031 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3032 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3035 // Check that they won't violate our local required channel reserve by adding this HTLC.
3036 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3037 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3038 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3039 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3042 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3043 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3045 if msg.cltv_expiry >= 500000000 {
3046 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3049 if self.context.channel_state & ChannelState::LocalShutdownSent as u32 != 0 {
3050 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3051 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3055 // Now update local state:
3056 self.context.next_counterparty_htlc_id += 1;
3057 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3058 htlc_id: msg.htlc_id,
3059 amount_msat: msg.amount_msat,
3060 payment_hash: msg.payment_hash,
3061 cltv_expiry: msg.cltv_expiry,
3062 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3067 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3069 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3070 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3071 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3072 if htlc.htlc_id == htlc_id {
3073 let outcome = match check_preimage {
3074 None => fail_reason.into(),
3075 Some(payment_preimage) => {
3076 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3077 if payment_hash != htlc.payment_hash {
3078 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3080 OutboundHTLCOutcome::Success(Some(payment_preimage))
3084 OutboundHTLCState::LocalAnnounced(_) =>
3085 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3086 OutboundHTLCState::Committed => {
3087 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3089 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3090 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3095 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3098 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3099 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3100 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3102 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3103 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3106 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3109 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3110 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3111 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3113 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3114 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3117 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3121 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3122 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3123 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3125 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3126 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3129 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3133 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3134 where L::Target: Logger
3136 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3137 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3139 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3140 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3142 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3143 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3146 let funding_script = self.context.get_funding_redeemscript();
3148 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3150 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3151 let commitment_txid = {
3152 let trusted_tx = commitment_stats.tx.trust();
3153 let bitcoin_tx = trusted_tx.built_transaction();
3154 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3156 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3157 log_bytes!(msg.signature.serialize_compact()[..]),
3158 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3159 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3160 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3161 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3165 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3167 // If our counterparty updated the channel fee in this commitment transaction, check that
3168 // they can actually afford the new fee now.
3169 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3170 update_state == FeeUpdateState::RemoteAnnounced
3173 debug_assert!(!self.context.is_outbound());
3174 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3175 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3176 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3179 #[cfg(any(test, fuzzing))]
3181 if self.context.is_outbound() {
3182 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3183 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3184 if let Some(info) = projected_commit_tx_info {
3185 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3186 + self.context.holding_cell_htlc_updates.len();
3187 if info.total_pending_htlcs == total_pending_htlcs
3188 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3189 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3190 && info.feerate == self.context.feerate_per_kw {
3191 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3197 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3198 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3201 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3202 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3203 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3204 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3205 // backwards compatibility, we never use it in production. To provide test coverage, here,
3206 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3207 #[allow(unused_assignments, unused_mut)]
3208 let mut separate_nondust_htlc_sources = false;
3209 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3210 use core::hash::{BuildHasher, Hasher};
3211 // Get a random value using the only std API to do so - the DefaultHasher
3212 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3213 separate_nondust_htlc_sources = rand_val % 2 == 0;
3216 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3217 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3218 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3219 if let Some(_) = htlc.transaction_output_index {
3220 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3221 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3222 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3224 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3225 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3226 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3227 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3228 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3229 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3230 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3231 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3233 if !separate_nondust_htlc_sources {
3234 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3237 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3239 if separate_nondust_htlc_sources {
3240 if let Some(source) = source_opt.take() {
3241 nondust_htlc_sources.push(source);
3244 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3247 let holder_commitment_tx = HolderCommitmentTransaction::new(
3248 commitment_stats.tx,
3250 msg.htlc_signatures.clone(),
3251 &self.context.get_holder_pubkeys().funding_pubkey,
3252 self.context.counterparty_funding_pubkey()
3255 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3256 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3258 // Update state now that we've passed all the can-fail calls...
3259 let mut need_commitment = false;
3260 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3261 if *update_state == FeeUpdateState::RemoteAnnounced {
3262 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3263 need_commitment = true;
3267 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3268 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3269 Some(forward_info.clone())
3271 if let Some(forward_info) = new_forward {
3272 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3273 &htlc.payment_hash, &self.context.channel_id);
3274 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3275 need_commitment = true;
3278 let mut claimed_htlcs = Vec::new();
3279 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3280 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3281 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3282 &htlc.payment_hash, &self.context.channel_id);
3283 // Grab the preimage, if it exists, instead of cloning
3284 let mut reason = OutboundHTLCOutcome::Success(None);
3285 mem::swap(outcome, &mut reason);
3286 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3287 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3288 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3289 // have a `Success(None)` reason. In this case we could forget some HTLC
3290 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3291 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3293 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3295 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3296 need_commitment = true;
3300 self.context.latest_monitor_update_id += 1;
3301 let mut monitor_update = ChannelMonitorUpdate {
3302 update_id: self.context.latest_monitor_update_id,
3303 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3304 commitment_tx: holder_commitment_tx,
3305 htlc_outputs: htlcs_and_sigs,
3307 nondust_htlc_sources,
3311 self.context.cur_holder_commitment_transaction_number -= 1;
3312 self.context.expecting_peer_commitment_signed = false;
3313 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3314 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3315 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3317 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0 {
3318 // In case we initially failed monitor updating without requiring a response, we need
3319 // to make sure the RAA gets sent first.
3320 self.context.monitor_pending_revoke_and_ack = true;
3321 if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3322 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3323 // the corresponding HTLC status updates so that
3324 // get_last_commitment_update_for_send includes the right HTLCs.
3325 self.context.monitor_pending_commitment_signed = true;
3326 let mut additional_update = self.build_commitment_no_status_check(logger);
3327 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3328 // strictly increasing by one, so decrement it here.
3329 self.context.latest_monitor_update_id = monitor_update.update_id;
3330 monitor_update.updates.append(&mut additional_update.updates);
3332 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3333 &self.context.channel_id);
3334 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3337 let need_commitment_signed = if need_commitment && (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == 0 {
3338 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3339 // we'll send one right away when we get the revoke_and_ack when we
3340 // free_holding_cell_htlcs().
3341 let mut additional_update = self.build_commitment_no_status_check(logger);
3342 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3343 // strictly increasing by one, so decrement it here.
3344 self.context.latest_monitor_update_id = monitor_update.update_id;
3345 monitor_update.updates.append(&mut additional_update.updates);
3349 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3350 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3351 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3352 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3355 /// Public version of the below, checking relevant preconditions first.
3356 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3357 /// returns `(None, Vec::new())`.
3358 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3359 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3360 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3361 where F::Target: FeeEstimator, L::Target: Logger
3363 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 &&
3364 (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
3365 self.free_holding_cell_htlcs(fee_estimator, logger)
3366 } else { (None, Vec::new()) }
3369 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3370 /// for our counterparty.
3371 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3372 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3373 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3374 where F::Target: FeeEstimator, L::Target: Logger
3376 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, 0);
3377 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3378 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3379 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3381 let mut monitor_update = ChannelMonitorUpdate {
3382 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3383 updates: Vec::new(),
3386 let mut htlc_updates = Vec::new();
3387 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3388 let mut update_add_count = 0;
3389 let mut update_fulfill_count = 0;
3390 let mut update_fail_count = 0;
3391 let mut htlcs_to_fail = Vec::new();
3392 for htlc_update in htlc_updates.drain(..) {
3393 // Note that this *can* fail, though it should be due to rather-rare conditions on
3394 // fee races with adding too many outputs which push our total payments just over
3395 // the limit. In case it's less rare than I anticipate, we may want to revisit
3396 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3397 // to rebalance channels.
3398 match &htlc_update {
3399 &HTLCUpdateAwaitingACK::AddHTLC {
3400 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3401 skimmed_fee_msat, blinding_point, ..
3403 match self.send_htlc(
3404 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3405 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3407 Ok(_) => update_add_count += 1,
3410 ChannelError::Ignore(ref msg) => {
3411 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3412 // If we fail to send here, then this HTLC should
3413 // be failed backwards. Failing to send here
3414 // indicates that this HTLC may keep being put back
3415 // into the holding cell without ever being
3416 // successfully forwarded/failed/fulfilled, causing
3417 // our counterparty to eventually close on us.
3418 htlcs_to_fail.push((source.clone(), *payment_hash));
3421 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3427 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3428 // If an HTLC claim was previously added to the holding cell (via
3429 // `get_update_fulfill_htlc`, then generating the claim message itself must
3430 // not fail - any in between attempts to claim the HTLC will have resulted
3431 // in it hitting the holding cell again and we cannot change the state of a
3432 // holding cell HTLC from fulfill to anything else.
3433 let mut additional_monitor_update =
3434 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3435 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3436 { monitor_update } else { unreachable!() };
3437 update_fulfill_count += 1;
3438 monitor_update.updates.append(&mut additional_monitor_update.updates);
3440 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3441 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3442 Ok(update_fail_msg_option) => {
3443 // If an HTLC failure was previously added to the holding cell (via
3444 // `queue_fail_htlc`) then generating the fail message itself must
3445 // not fail - we should never end up in a state where we double-fail
3446 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3447 // for a full revocation before failing.
3448 debug_assert!(update_fail_msg_option.is_some());
3449 update_fail_count += 1;
3452 if let ChannelError::Ignore(_) = e {}
3454 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3461 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3462 return (None, htlcs_to_fail);
3464 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3465 self.send_update_fee(feerate, false, fee_estimator, logger)
3470 let mut additional_update = self.build_commitment_no_status_check(logger);
3471 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3472 // but we want them to be strictly increasing by one, so reset it here.
3473 self.context.latest_monitor_update_id = monitor_update.update_id;
3474 monitor_update.updates.append(&mut additional_update.updates);
3476 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3477 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3478 update_add_count, update_fulfill_count, update_fail_count);
3480 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3481 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3487 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3488 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3489 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3490 /// generating an appropriate error *after* the channel state has been updated based on the
3491 /// revoke_and_ack message.
3492 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3493 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3494 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3495 where F::Target: FeeEstimator, L::Target: Logger,
3497 if (self.context.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
3498 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3500 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
3501 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3503 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK == BOTH_SIDES_SHUTDOWN_MASK && self.context.last_sent_closing_fee.is_some() {
3504 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3507 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3509 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3510 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3511 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3515 if self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 == 0 {
3516 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3517 // haven't given them a new commitment transaction to broadcast). We should probably
3518 // take advantage of this by updating our channel monitor, sending them an error, and
3519 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3520 // lot of work, and there's some chance this is all a misunderstanding anyway.
3521 // We have to do *something*, though, since our signer may get mad at us for otherwise
3522 // jumping a remote commitment number, so best to just force-close and move on.
3523 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3526 #[cfg(any(test, fuzzing))]
3528 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3529 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3532 match &self.context.holder_signer {
3533 ChannelSignerType::Ecdsa(ecdsa) => {
3534 ecdsa.validate_counterparty_revocation(
3535 self.context.cur_counterparty_commitment_transaction_number + 1,
3537 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3539 // TODO (taproot|arik)
3544 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3545 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3546 self.context.latest_monitor_update_id += 1;
3547 let mut monitor_update = ChannelMonitorUpdate {
3548 update_id: self.context.latest_monitor_update_id,
3549 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3550 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3551 secret: msg.per_commitment_secret,
3555 // Update state now that we've passed all the can-fail calls...
3556 // (note that we may still fail to generate the new commitment_signed message, but that's
3557 // OK, we step the channel here and *then* if the new generation fails we can fail the
3558 // channel based on that, but stepping stuff here should be safe either way.
3559 self.context.channel_state &= !(ChannelState::AwaitingRemoteRevoke as u32);
3560 self.context.sent_message_awaiting_response = None;
3561 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3562 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3563 self.context.cur_counterparty_commitment_transaction_number -= 1;
3565 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3566 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3569 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3570 let mut to_forward_infos = Vec::new();
3571 let mut revoked_htlcs = Vec::new();
3572 let mut finalized_claimed_htlcs = Vec::new();
3573 let mut update_fail_htlcs = Vec::new();
3574 let mut update_fail_malformed_htlcs = Vec::new();
3575 let mut require_commitment = false;
3576 let mut value_to_self_msat_diff: i64 = 0;
3579 // Take references explicitly so that we can hold multiple references to self.context.
3580 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3581 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3582 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3584 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3585 pending_inbound_htlcs.retain(|htlc| {
3586 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3587 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3588 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3589 value_to_self_msat_diff += htlc.amount_msat as i64;
3591 *expecting_peer_commitment_signed = true;
3595 pending_outbound_htlcs.retain(|htlc| {
3596 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3597 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3598 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3599 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3601 finalized_claimed_htlcs.push(htlc.source.clone());
3602 // They fulfilled, so we sent them money
3603 value_to_self_msat_diff -= htlc.amount_msat as i64;
3608 for htlc in pending_inbound_htlcs.iter_mut() {
3609 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3611 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3615 let mut state = InboundHTLCState::Committed;
3616 mem::swap(&mut state, &mut htlc.state);
3618 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3619 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3620 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3621 require_commitment = true;
3622 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3623 match forward_info {
3624 PendingHTLCStatus::Fail(fail_msg) => {
3625 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3626 require_commitment = true;
3628 HTLCFailureMsg::Relay(msg) => {
3629 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3630 update_fail_htlcs.push(msg)
3632 HTLCFailureMsg::Malformed(msg) => {
3633 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3634 update_fail_malformed_htlcs.push(msg)
3638 PendingHTLCStatus::Forward(forward_info) => {
3639 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3640 to_forward_infos.push((forward_info, htlc.htlc_id));
3641 htlc.state = InboundHTLCState::Committed;
3647 for htlc in pending_outbound_htlcs.iter_mut() {
3648 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3649 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3650 htlc.state = OutboundHTLCState::Committed;
3651 *expecting_peer_commitment_signed = true;
3653 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3654 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3655 // Grab the preimage, if it exists, instead of cloning
3656 let mut reason = OutboundHTLCOutcome::Success(None);
3657 mem::swap(outcome, &mut reason);
3658 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3659 require_commitment = true;
3663 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3665 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3666 match update_state {
3667 FeeUpdateState::Outbound => {
3668 debug_assert!(self.context.is_outbound());
3669 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3670 self.context.feerate_per_kw = feerate;
3671 self.context.pending_update_fee = None;
3672 self.context.expecting_peer_commitment_signed = true;
3674 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3675 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3676 debug_assert!(!self.context.is_outbound());
3677 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3678 require_commitment = true;
3679 self.context.feerate_per_kw = feerate;
3680 self.context.pending_update_fee = None;
3685 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3686 let release_state_str =
3687 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3688 macro_rules! return_with_htlcs_to_fail {
3689 ($htlcs_to_fail: expr) => {
3690 if !release_monitor {
3691 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3692 update: monitor_update,
3694 return Ok(($htlcs_to_fail, None));
3696 return Ok(($htlcs_to_fail, Some(monitor_update)));
3701 if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 {
3702 // We can't actually generate a new commitment transaction (incl by freeing holding
3703 // cells) while we can't update the monitor, so we just return what we have.
3704 if require_commitment {
3705 self.context.monitor_pending_commitment_signed = true;
3706 // When the monitor updating is restored we'll call
3707 // get_last_commitment_update_for_send(), which does not update state, but we're
3708 // definitely now awaiting a remote revoke before we can step forward any more, so
3710 let mut additional_update = self.build_commitment_no_status_check(logger);
3711 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3712 // strictly increasing by one, so decrement it here.
3713 self.context.latest_monitor_update_id = monitor_update.update_id;
3714 monitor_update.updates.append(&mut additional_update.updates);
3716 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3717 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3718 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3719 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3720 return_with_htlcs_to_fail!(Vec::new());
3723 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3724 (Some(mut additional_update), htlcs_to_fail) => {
3725 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3726 // strictly increasing by one, so decrement it here.
3727 self.context.latest_monitor_update_id = monitor_update.update_id;
3728 monitor_update.updates.append(&mut additional_update.updates);
3730 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3731 &self.context.channel_id(), release_state_str);
3733 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3734 return_with_htlcs_to_fail!(htlcs_to_fail);
3736 (None, htlcs_to_fail) => {
3737 if require_commitment {
3738 let mut additional_update = self.build_commitment_no_status_check(logger);
3740 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3741 // strictly increasing by one, so decrement it here.
3742 self.context.latest_monitor_update_id = monitor_update.update_id;
3743 monitor_update.updates.append(&mut additional_update.updates);
3745 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3746 &self.context.channel_id(),
3747 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3750 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3751 return_with_htlcs_to_fail!(htlcs_to_fail);
3753 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3754 &self.context.channel_id(), release_state_str);
3756 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3757 return_with_htlcs_to_fail!(htlcs_to_fail);
3763 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3764 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3765 /// commitment update.
3766 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3767 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3768 where F::Target: FeeEstimator, L::Target: Logger
3770 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3771 assert!(msg_opt.is_none(), "We forced holding cell?");
3774 /// Adds a pending update to this channel. See the doc for send_htlc for
3775 /// further details on the optionness of the return value.
3776 /// If our balance is too low to cover the cost of the next commitment transaction at the
3777 /// new feerate, the update is cancelled.
3779 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3780 /// [`Channel`] if `force_holding_cell` is false.
3781 fn send_update_fee<F: Deref, L: Deref>(
3782 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3783 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3784 ) -> Option<msgs::UpdateFee>
3785 where F::Target: FeeEstimator, L::Target: Logger
3787 if !self.context.is_outbound() {
3788 panic!("Cannot send fee from inbound channel");
3790 if !self.context.is_usable() {
3791 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3793 if !self.context.is_live() {
3794 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3797 // Before proposing a feerate update, check that we can actually afford the new fee.
3798 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3799 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3800 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3801 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3802 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3803 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3804 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3805 //TODO: auto-close after a number of failures?
3806 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3810 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3811 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3812 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3813 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3814 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3815 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3818 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3819 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3823 if (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0 {
3824 force_holding_cell = true;
3827 if force_holding_cell {
3828 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3832 debug_assert!(self.context.pending_update_fee.is_none());
3833 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3835 Some(msgs::UpdateFee {
3836 channel_id: self.context.channel_id,
3841 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3842 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3844 /// No further message handling calls may be made until a channel_reestablish dance has
3846 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3847 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3848 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
3849 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
3853 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == (ChannelState::PeerDisconnected as u32) {
3854 // While the below code should be idempotent, it's simpler to just return early, as
3855 // redundant disconnect events can fire, though they should be rare.
3859 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3860 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3863 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3864 // will be retransmitted.
3865 self.context.last_sent_closing_fee = None;
3866 self.context.pending_counterparty_closing_signed = None;
3867 self.context.closing_fee_limits = None;
3869 let mut inbound_drop_count = 0;
3870 self.context.pending_inbound_htlcs.retain(|htlc| {
3872 InboundHTLCState::RemoteAnnounced(_) => {
3873 // They sent us an update_add_htlc but we never got the commitment_signed.
3874 // We'll tell them what commitment_signed we're expecting next and they'll drop
3875 // this HTLC accordingly
3876 inbound_drop_count += 1;
3879 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3880 // We received a commitment_signed updating this HTLC and (at least hopefully)
3881 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3882 // in response to it yet, so don't touch it.
3885 InboundHTLCState::Committed => true,
3886 InboundHTLCState::LocalRemoved(_) => {
3887 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
3888 // re-transmit if needed) and they may have even sent a revoke_and_ack back
3889 // (that we missed). Keep this around for now and if they tell us they missed
3890 // the commitment_signed we can re-transmit the update then.
3895 self.context.next_counterparty_htlc_id -= inbound_drop_count;
3897 if let Some((_, update_state)) = self.context.pending_update_fee {
3898 if update_state == FeeUpdateState::RemoteAnnounced {
3899 debug_assert!(!self.context.is_outbound());
3900 self.context.pending_update_fee = None;
3904 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3905 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
3906 // They sent us an update to remove this but haven't yet sent the corresponding
3907 // commitment_signed, we need to move it back to Committed and they can re-send
3908 // the update upon reconnection.
3909 htlc.state = OutboundHTLCState::Committed;
3913 self.context.sent_message_awaiting_response = None;
3915 self.context.channel_state |= ChannelState::PeerDisconnected as u32;
3916 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
3920 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
3921 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
3922 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
3923 /// update completes (potentially immediately).
3924 /// The messages which were generated with the monitor update must *not* have been sent to the
3925 /// remote end, and must instead have been dropped. They will be regenerated when
3926 /// [`Self::monitor_updating_restored`] is called.
3928 /// [`ChannelManager`]: super::channelmanager::ChannelManager
3929 /// [`chain::Watch`]: crate::chain::Watch
3930 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
3931 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
3932 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
3933 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
3934 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
3936 self.context.monitor_pending_revoke_and_ack |= resend_raa;
3937 self.context.monitor_pending_commitment_signed |= resend_commitment;
3938 self.context.monitor_pending_channel_ready |= resend_channel_ready;
3939 self.context.monitor_pending_forwards.append(&mut pending_forwards);
3940 self.context.monitor_pending_failures.append(&mut pending_fails);
3941 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
3942 self.context.channel_state |= ChannelState::MonitorUpdateInProgress as u32;
3945 /// Indicates that the latest ChannelMonitor update has been committed by the client
3946 /// successfully and we should restore normal operation. Returns messages which should be sent
3947 /// to the remote side.
3948 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
3949 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
3950 user_config: &UserConfig, best_block_height: u32
3951 ) -> MonitorRestoreUpdates
3954 NS::Target: NodeSigner
3956 assert_eq!(self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32, ChannelState::MonitorUpdateInProgress as u32);
3957 self.context.channel_state &= !(ChannelState::MonitorUpdateInProgress as u32);
3959 // If we're past (or at) the FundingSent stage on an outbound channel, try to
3960 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
3961 // first received the funding_signed.
3962 let mut funding_broadcastable =
3963 if self.context.is_outbound() && self.context.channel_state & !STATE_FLAGS >= ChannelState::FundingSent as u32 && self.context.channel_state & ChannelState::WaitingForBatch as u32 == 0 {
3964 self.context.funding_transaction.take()
3966 // That said, if the funding transaction is already confirmed (ie we're active with a
3967 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
3968 if self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 && self.context.minimum_depth != Some(0) {
3969 funding_broadcastable = None;
3972 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
3973 // (and we assume the user never directly broadcasts the funding transaction and waits for
3974 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
3975 // * an inbound channel that failed to persist the monitor on funding_created and we got
3976 // the funding transaction confirmed before the monitor was persisted, or
3977 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
3978 let channel_ready = if self.context.monitor_pending_channel_ready {
3979 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
3980 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
3981 self.context.monitor_pending_channel_ready = false;
3982 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
3983 Some(msgs::ChannelReady {
3984 channel_id: self.context.channel_id(),
3985 next_per_commitment_point,
3986 short_channel_id_alias: Some(self.context.outbound_scid_alias),
3990 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
3992 let mut accepted_htlcs = Vec::new();
3993 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
3994 let mut failed_htlcs = Vec::new();
3995 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
3996 let mut finalized_claimed_htlcs = Vec::new();
3997 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
3999 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) != 0 {
4000 self.context.monitor_pending_revoke_and_ack = false;
4001 self.context.monitor_pending_commitment_signed = false;
4002 return MonitorRestoreUpdates {
4003 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4004 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4008 let raa = if self.context.monitor_pending_revoke_and_ack {
4009 Some(self.get_last_revoke_and_ack())
4011 let commitment_update = if self.context.monitor_pending_commitment_signed {
4012 self.get_last_commitment_update_for_send(logger).ok()
4014 if commitment_update.is_some() {
4015 self.mark_awaiting_response();
4018 self.context.monitor_pending_revoke_and_ack = false;
4019 self.context.monitor_pending_commitment_signed = false;
4020 let order = self.context.resend_order.clone();
4021 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4022 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4023 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4024 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4025 MonitorRestoreUpdates {
4026 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4030 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4031 where F::Target: FeeEstimator, L::Target: Logger
4033 if self.context.is_outbound() {
4034 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4036 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4037 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4039 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4041 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4042 self.context.update_time_counter += 1;
4043 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4044 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4045 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4046 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4047 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4048 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4049 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4050 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4051 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4052 msg.feerate_per_kw, holder_tx_dust_exposure)));
4054 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4055 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4056 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4062 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4065 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4066 let commitment_update = if self.context.signer_pending_commitment_update {
4067 self.get_last_commitment_update_for_send(logger).ok()
4069 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4070 self.context.get_funding_signed_msg(logger).1
4072 let channel_ready = if funding_signed.is_some() {
4073 self.check_get_channel_ready(0)
4075 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4076 self.context.get_funding_created_msg(logger)
4079 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4080 if commitment_update.is_some() { "a" } else { "no" },
4081 if funding_signed.is_some() { "a" } else { "no" },
4082 if funding_created.is_some() { "a" } else { "no" },
4083 if channel_ready.is_some() { "a" } else { "no" });
4085 SignerResumeUpdates {
4093 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4094 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4095 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4096 msgs::RevokeAndACK {
4097 channel_id: self.context.channel_id,
4098 per_commitment_secret,
4099 next_per_commitment_point,
4101 next_local_nonce: None,
4105 /// Gets the last commitment update for immediate sending to our peer.
4106 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4107 let mut update_add_htlcs = Vec::new();
4108 let mut update_fulfill_htlcs = Vec::new();
4109 let mut update_fail_htlcs = Vec::new();
4110 let mut update_fail_malformed_htlcs = Vec::new();
4112 for htlc in self.context.pending_outbound_htlcs.iter() {
4113 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4114 update_add_htlcs.push(msgs::UpdateAddHTLC {
4115 channel_id: self.context.channel_id(),
4116 htlc_id: htlc.htlc_id,
4117 amount_msat: htlc.amount_msat,
4118 payment_hash: htlc.payment_hash,
4119 cltv_expiry: htlc.cltv_expiry,
4120 onion_routing_packet: (**onion_packet).clone(),
4121 skimmed_fee_msat: htlc.skimmed_fee_msat,
4122 blinding_point: htlc.blinding_point,
4127 for htlc in self.context.pending_inbound_htlcs.iter() {
4128 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4130 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4131 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4132 channel_id: self.context.channel_id(),
4133 htlc_id: htlc.htlc_id,
4134 reason: err_packet.clone()
4137 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4138 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4139 channel_id: self.context.channel_id(),
4140 htlc_id: htlc.htlc_id,
4141 sha256_of_onion: sha256_of_onion.clone(),
4142 failure_code: failure_code.clone(),
4145 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4146 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4147 channel_id: self.context.channel_id(),
4148 htlc_id: htlc.htlc_id,
4149 payment_preimage: payment_preimage.clone(),
4156 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4157 Some(msgs::UpdateFee {
4158 channel_id: self.context.channel_id(),
4159 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4163 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4164 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4165 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4166 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4167 if self.context.signer_pending_commitment_update {
4168 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4169 self.context.signer_pending_commitment_update = false;
4173 if !self.context.signer_pending_commitment_update {
4174 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4175 self.context.signer_pending_commitment_update = true;
4179 Ok(msgs::CommitmentUpdate {
4180 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4185 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4186 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4187 if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
4188 assert!(self.context.shutdown_scriptpubkey.is_some());
4189 Some(msgs::Shutdown {
4190 channel_id: self.context.channel_id,
4191 scriptpubkey: self.get_closing_scriptpubkey(),
4196 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4197 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4199 /// Some links printed in log lines are included here to check them during build (when run with
4200 /// `cargo doc --document-private-items`):
4201 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4202 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4203 pub fn channel_reestablish<L: Deref, NS: Deref>(
4204 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4205 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4206 ) -> Result<ReestablishResponses, ChannelError>
4209 NS::Target: NodeSigner
4211 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
4212 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4213 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4214 // just close here instead of trying to recover.
4215 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4218 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4219 msg.next_local_commitment_number == 0 {
4220 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4223 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4224 if msg.next_remote_commitment_number > 0 {
4225 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4226 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4227 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4228 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4229 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4231 if msg.next_remote_commitment_number > our_commitment_transaction {
4232 macro_rules! log_and_panic {
4233 ($err_msg: expr) => {
4234 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4235 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4238 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4239 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4240 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4241 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4242 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4243 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4244 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4245 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4249 // Before we change the state of the channel, we check if the peer is sending a very old
4250 // commitment transaction number, if yes we send a warning message.
4251 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4252 return Err(ChannelError::Warn(format!(
4253 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4254 msg.next_remote_commitment_number,
4255 our_commitment_transaction
4259 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4260 // remaining cases either succeed or ErrorMessage-fail).
4261 self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
4262 self.context.sent_message_awaiting_response = None;
4264 let shutdown_msg = self.get_outbound_shutdown();
4266 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4268 if self.context.channel_state & (ChannelState::FundingSent as u32) == ChannelState::FundingSent as u32 {
4269 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4270 if self.context.channel_state & (ChannelState::OurChannelReady as u32) == 0 ||
4271 self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4272 if msg.next_remote_commitment_number != 0 {
4273 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4275 // Short circuit the whole handler as there is nothing we can resend them
4276 return Ok(ReestablishResponses {
4277 channel_ready: None,
4278 raa: None, commitment_update: None,
4279 order: RAACommitmentOrder::CommitmentFirst,
4280 shutdown_msg, announcement_sigs,
4284 // We have OurChannelReady set!
4285 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4286 return Ok(ReestablishResponses {
4287 channel_ready: Some(msgs::ChannelReady {
4288 channel_id: self.context.channel_id(),
4289 next_per_commitment_point,
4290 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4292 raa: None, commitment_update: None,
4293 order: RAACommitmentOrder::CommitmentFirst,
4294 shutdown_msg, announcement_sigs,
4298 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4299 // Remote isn't waiting on any RevokeAndACK from us!
4300 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4302 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4303 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4304 self.context.monitor_pending_revoke_and_ack = true;
4307 Some(self.get_last_revoke_and_ack())
4310 debug_assert!(false, "All values should have been handled in the four cases above");
4311 return Err(ChannelError::Close(format!(
4312 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4313 msg.next_remote_commitment_number,
4314 our_commitment_transaction
4318 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4319 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4320 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4321 // the corresponding revoke_and_ack back yet.
4322 let is_awaiting_remote_revoke = self.context.channel_state & ChannelState::AwaitingRemoteRevoke as u32 != 0;
4323 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4324 self.mark_awaiting_response();
4326 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4328 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4329 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4330 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4331 Some(msgs::ChannelReady {
4332 channel_id: self.context.channel_id(),
4333 next_per_commitment_point,
4334 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4338 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4339 if required_revoke.is_some() {
4340 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4342 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4345 Ok(ReestablishResponses {
4346 channel_ready, shutdown_msg, announcement_sigs,
4347 raa: required_revoke,
4348 commitment_update: None,
4349 order: self.context.resend_order.clone(),
4351 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4352 if required_revoke.is_some() {
4353 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4355 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4358 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) != 0 {
4359 self.context.monitor_pending_commitment_signed = true;
4360 Ok(ReestablishResponses {
4361 channel_ready, shutdown_msg, announcement_sigs,
4362 commitment_update: None, raa: None,
4363 order: self.context.resend_order.clone(),
4366 Ok(ReestablishResponses {
4367 channel_ready, shutdown_msg, announcement_sigs,
4368 raa: required_revoke,
4369 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4370 order: self.context.resend_order.clone(),
4373 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4374 Err(ChannelError::Close(format!(
4375 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4376 msg.next_local_commitment_number,
4377 next_counterparty_commitment_number,
4380 Err(ChannelError::Close(format!(
4381 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4382 msg.next_local_commitment_number,
4383 next_counterparty_commitment_number,
4388 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4389 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4390 /// at which point they will be recalculated.
4391 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4393 where F::Target: FeeEstimator
4395 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4397 // Propose a range from our current Background feerate to our Normal feerate plus our
4398 // force_close_avoidance_max_fee_satoshis.
4399 // If we fail to come to consensus, we'll have to force-close.
4400 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4401 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4402 // that we don't expect to need fee bumping
4403 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4404 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4406 // The spec requires that (when the channel does not have anchors) we only send absolute
4407 // channel fees no greater than the absolute channel fee on the current commitment
4408 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4409 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4410 // some force-closure by old nodes, but we wanted to close the channel anyway.
4412 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4413 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4414 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4415 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4418 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4419 // below our dust limit, causing the output to disappear. We don't bother handling this
4420 // case, however, as this should only happen if a channel is closed before any (material)
4421 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4422 // come to consensus with our counterparty on appropriate fees, however it should be a
4423 // relatively rare case. We can revisit this later, though note that in order to determine
4424 // if the funders' output is dust we have to know the absolute fee we're going to use.
4425 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4426 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4427 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4428 // We always add force_close_avoidance_max_fee_satoshis to our normal
4429 // feerate-calculated fee, but allow the max to be overridden if we're using a
4430 // target feerate-calculated fee.
4431 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4432 proposed_max_feerate as u64 * tx_weight / 1000)
4434 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4437 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4438 self.context.closing_fee_limits.clone().unwrap()
4441 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4442 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4443 /// this point if we're the funder we should send the initial closing_signed, and in any case
4444 /// shutdown should complete within a reasonable timeframe.
4445 fn closing_negotiation_ready(&self) -> bool {
4446 self.context.closing_negotiation_ready()
4449 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4450 /// an Err if no progress is being made and the channel should be force-closed instead.
4451 /// Should be called on a one-minute timer.
4452 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4453 if self.closing_negotiation_ready() {
4454 if self.context.closing_signed_in_flight {
4455 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4457 self.context.closing_signed_in_flight = true;
4463 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4464 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4465 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4466 where F::Target: FeeEstimator, L::Target: Logger
4468 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4469 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4470 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4471 // that closing_negotiation_ready checks this case (as well as a few others).
4472 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4473 return Ok((None, None, None));
4476 if !self.context.is_outbound() {
4477 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4478 return self.closing_signed(fee_estimator, &msg);
4480 return Ok((None, None, None));
4483 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4484 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4485 if self.context.expecting_peer_commitment_signed {
4486 return Ok((None, None, None));
4489 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4491 assert!(self.context.shutdown_scriptpubkey.is_some());
4492 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4493 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4494 our_min_fee, our_max_fee, total_fee_satoshis);
4496 match &self.context.holder_signer {
4497 ChannelSignerType::Ecdsa(ecdsa) => {
4499 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4500 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4502 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4503 Ok((Some(msgs::ClosingSigned {
4504 channel_id: self.context.channel_id,
4505 fee_satoshis: total_fee_satoshis,
4507 fee_range: Some(msgs::ClosingSignedFeeRange {
4508 min_fee_satoshis: our_min_fee,
4509 max_fee_satoshis: our_max_fee,
4513 // TODO (taproot|arik)
4519 // Marks a channel as waiting for a response from the counterparty. If it's not received
4520 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4522 fn mark_awaiting_response(&mut self) {
4523 self.context.sent_message_awaiting_response = Some(0);
4526 /// Determines whether we should disconnect the counterparty due to not receiving a response
4527 /// within our expected timeframe.
4529 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4530 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4531 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4534 // Don't disconnect when we're not waiting on a response.
4537 *ticks_elapsed += 1;
4538 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4542 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4543 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4545 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4546 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4548 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
4549 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4550 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4551 // can do that via error message without getting a connection fail anyway...
4552 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4554 for htlc in self.context.pending_inbound_htlcs.iter() {
4555 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4556 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4559 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
4561 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4562 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4565 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4566 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4567 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4570 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4573 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4574 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4575 // any further commitment updates after we set LocalShutdownSent.
4576 let send_shutdown = (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != ChannelState::LocalShutdownSent as u32;
4578 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4581 assert!(send_shutdown);
4582 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4583 Ok(scriptpubkey) => scriptpubkey,
4584 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4586 if !shutdown_scriptpubkey.is_compatible(their_features) {
4587 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4589 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4594 // From here on out, we may not fail!
4596 self.context.channel_state |= ChannelState::RemoteShutdownSent as u32;
4597 self.context.update_time_counter += 1;
4599 let monitor_update = if update_shutdown_script {
4600 self.context.latest_monitor_update_id += 1;
4601 let monitor_update = ChannelMonitorUpdate {
4602 update_id: self.context.latest_monitor_update_id,
4603 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4604 scriptpubkey: self.get_closing_scriptpubkey(),
4607 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4608 self.push_ret_blockable_mon_update(monitor_update)
4610 let shutdown = if send_shutdown {
4611 Some(msgs::Shutdown {
4612 channel_id: self.context.channel_id,
4613 scriptpubkey: self.get_closing_scriptpubkey(),
4617 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4618 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4619 // cell HTLCs and return them to fail the payment.
4620 self.context.holding_cell_update_fee = None;
4621 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4622 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4624 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4625 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4632 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
4633 self.context.update_time_counter += 1;
4635 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4638 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4639 let mut tx = closing_tx.trust().built_transaction().clone();
4641 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4643 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4644 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4645 let mut holder_sig = sig.serialize_der().to_vec();
4646 holder_sig.push(EcdsaSighashType::All as u8);
4647 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4648 cp_sig.push(EcdsaSighashType::All as u8);
4649 if funding_key[..] < counterparty_funding_key[..] {
4650 tx.input[0].witness.push(holder_sig);
4651 tx.input[0].witness.push(cp_sig);
4653 tx.input[0].witness.push(cp_sig);
4654 tx.input[0].witness.push(holder_sig);
4657 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4661 pub fn closing_signed<F: Deref>(
4662 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4663 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4664 where F::Target: FeeEstimator
4666 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != BOTH_SIDES_SHUTDOWN_MASK {
4667 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4669 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
4670 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4672 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4673 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4675 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4676 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4679 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4680 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4683 if self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32 != 0 {
4684 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4685 return Ok((None, None, None));
4688 let funding_redeemscript = self.context.get_funding_redeemscript();
4689 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4690 if used_total_fee != msg.fee_satoshis {
4691 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4693 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4695 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4698 // The remote end may have decided to revoke their output due to inconsistent dust
4699 // limits, so check for that case by re-checking the signature here.
4700 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4701 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4702 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4706 for outp in closing_tx.trust().built_transaction().output.iter() {
4707 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4708 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4712 assert!(self.context.shutdown_scriptpubkey.is_some());
4713 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4714 if last_fee == msg.fee_satoshis {
4715 let shutdown_result = ShutdownResult {
4716 monitor_update: None,
4717 dropped_outbound_htlcs: Vec::new(),
4718 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4719 channel_id: self.context.channel_id,
4720 counterparty_node_id: self.context.counterparty_node_id,
4722 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4723 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4724 self.context.update_time_counter += 1;
4725 return Ok((None, Some(tx), Some(shutdown_result)));
4729 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4731 macro_rules! propose_fee {
4732 ($new_fee: expr) => {
4733 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4734 (closing_tx, $new_fee)
4736 self.build_closing_transaction($new_fee, false)
4739 return match &self.context.holder_signer {
4740 ChannelSignerType::Ecdsa(ecdsa) => {
4742 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4743 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4744 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4745 let shutdown_result = ShutdownResult {
4746 monitor_update: None,
4747 dropped_outbound_htlcs: Vec::new(),
4748 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4749 channel_id: self.context.channel_id,
4750 counterparty_node_id: self.context.counterparty_node_id,
4752 self.context.channel_state = ChannelState::ShutdownComplete as u32;
4753 self.context.update_time_counter += 1;
4754 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4755 (Some(tx), Some(shutdown_result))
4760 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4761 Ok((Some(msgs::ClosingSigned {
4762 channel_id: self.context.channel_id,
4763 fee_satoshis: used_fee,
4765 fee_range: Some(msgs::ClosingSignedFeeRange {
4766 min_fee_satoshis: our_min_fee,
4767 max_fee_satoshis: our_max_fee,
4769 }), signed_tx, shutdown_result))
4771 // TODO (taproot|arik)
4778 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4779 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4780 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4782 if max_fee_satoshis < our_min_fee {
4783 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4785 if min_fee_satoshis > our_max_fee {
4786 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4789 if !self.context.is_outbound() {
4790 // They have to pay, so pick the highest fee in the overlapping range.
4791 // We should never set an upper bound aside from their full balance
4792 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4793 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4795 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4796 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4797 msg.fee_satoshis, our_min_fee, our_max_fee)));
4799 // The proposed fee is in our acceptable range, accept it and broadcast!
4800 propose_fee!(msg.fee_satoshis);
4803 // Old fee style negotiation. We don't bother to enforce whether they are complying
4804 // with the "making progress" requirements, we just comply and hope for the best.
4805 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4806 if msg.fee_satoshis > last_fee {
4807 if msg.fee_satoshis < our_max_fee {
4808 propose_fee!(msg.fee_satoshis);
4809 } else if last_fee < our_max_fee {
4810 propose_fee!(our_max_fee);
4812 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4815 if msg.fee_satoshis > our_min_fee {
4816 propose_fee!(msg.fee_satoshis);
4817 } else if last_fee > our_min_fee {
4818 propose_fee!(our_min_fee);
4820 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4824 if msg.fee_satoshis < our_min_fee {
4825 propose_fee!(our_min_fee);
4826 } else if msg.fee_satoshis > our_max_fee {
4827 propose_fee!(our_max_fee);
4829 propose_fee!(msg.fee_satoshis);
4835 fn internal_htlc_satisfies_config(
4836 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4837 ) -> Result<(), (&'static str, u16)> {
4838 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4839 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4840 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4841 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4843 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4844 0x1000 | 12, // fee_insufficient
4847 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4849 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4850 0x1000 | 13, // incorrect_cltv_expiry
4856 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4857 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4858 /// unsuccessful, falls back to the previous one if one exists.
4859 pub fn htlc_satisfies_config(
4860 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4861 ) -> Result<(), (&'static str, u16)> {
4862 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4864 if let Some(prev_config) = self.context.prev_config() {
4865 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4872 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4873 self.context.cur_holder_commitment_transaction_number + 1
4876 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4877 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32) != 0 { 1 } else { 0 }
4880 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4881 self.context.cur_counterparty_commitment_transaction_number + 2
4885 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
4886 &self.context.holder_signer
4890 pub fn get_value_stat(&self) -> ChannelValueStat {
4892 value_to_self_msat: self.context.value_to_self_msat,
4893 channel_value_msat: self.context.channel_value_satoshis * 1000,
4894 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
4895 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4896 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
4897 holding_cell_outbound_amount_msat: {
4899 for h in self.context.holding_cell_htlc_updates.iter() {
4901 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
4909 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
4910 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
4914 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
4915 /// Allowed in any state (including after shutdown)
4916 pub fn is_awaiting_monitor_update(&self) -> bool {
4917 (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) != 0
4920 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
4921 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
4922 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
4923 self.context.blocked_monitor_updates[0].update.update_id - 1
4926 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
4927 /// further blocked monitor update exists after the next.
4928 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
4929 if self.context.blocked_monitor_updates.is_empty() { return None; }
4930 Some((self.context.blocked_monitor_updates.remove(0).update,
4931 !self.context.blocked_monitor_updates.is_empty()))
4934 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
4935 /// immediately given to the user for persisting or `None` if it should be held as blocked.
4936 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
4937 -> Option<ChannelMonitorUpdate> {
4938 let release_monitor = self.context.blocked_monitor_updates.is_empty();
4939 if !release_monitor {
4940 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4949 pub fn blocked_monitor_updates_pending(&self) -> usize {
4950 self.context.blocked_monitor_updates.len()
4953 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
4954 /// If the channel is outbound, this implies we have not yet broadcasted the funding
4955 /// transaction. If the channel is inbound, this implies simply that the channel has not
4957 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
4958 if !self.is_awaiting_monitor_update() { return false; }
4959 if self.context.channel_state &
4960 !(ChannelState::TheirChannelReady as u32 | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32 | ChannelState::WaitingForBatch as u32)
4961 == ChannelState::FundingSent as u32 {
4962 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
4963 // FundingSent set, though our peer could have sent their channel_ready.
4964 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
4967 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
4968 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4969 // If we're a 0-conf channel, we'll move beyond FundingSent immediately even while
4970 // waiting for the initial monitor persistence. Thus, we check if our commitment
4971 // transaction numbers have both been iterated only exactly once (for the
4972 // funding_signed), and we're awaiting monitor update.
4974 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
4975 // only way to get an awaiting-monitor-update state during initial funding is if the
4976 // initial monitor persistence is still pending).
4978 // Because deciding we're awaiting initial broadcast spuriously could result in
4979 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
4980 // we hard-assert here, even in production builds.
4981 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
4982 assert!(self.context.monitor_pending_channel_ready);
4983 assert_eq!(self.context.latest_monitor_update_id, 0);
4989 /// Returns true if our channel_ready has been sent
4990 pub fn is_our_channel_ready(&self) -> bool {
4991 (self.context.channel_state & ChannelState::OurChannelReady as u32) != 0 || self.context.channel_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32
4994 /// Returns true if our peer has either initiated or agreed to shut down the channel.
4995 pub fn received_shutdown(&self) -> bool {
4996 (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) != 0
4999 /// Returns true if we either initiated or agreed to shut down the channel.
5000 pub fn sent_shutdown(&self) -> bool {
5001 (self.context.channel_state & ChannelState::LocalShutdownSent as u32) != 0
5004 /// Returns true if this channel is fully shut down. True here implies that no further actions
5005 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5006 /// will be handled appropriately by the chain monitor.
5007 pub fn is_shutdown(&self) -> bool {
5008 if (self.context.channel_state & ChannelState::ShutdownComplete as u32) == ChannelState::ShutdownComplete as u32 {
5009 assert!(self.context.channel_state == ChannelState::ShutdownComplete as u32);
5014 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5015 self.context.channel_update_status
5018 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5019 self.context.update_time_counter += 1;
5020 self.context.channel_update_status = status;
5023 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5025 // * always when a new block/transactions are confirmed with the new height
5026 // * when funding is signed with a height of 0
5027 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5031 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5032 if funding_tx_confirmations <= 0 {
5033 self.context.funding_tx_confirmation_height = 0;
5036 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5040 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5041 // channel_ready yet.
5042 if self.context.signer_pending_funding {
5046 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5047 // channel_ready until the entire batch is ready.
5048 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5049 let need_commitment_update = if non_shutdown_state == ChannelState::FundingSent as u32 {
5050 self.context.channel_state |= ChannelState::OurChannelReady as u32;
5052 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::TheirChannelReady as u32) {
5053 self.context.channel_state = ChannelState::ChannelReady as u32 | (self.context.channel_state & MULTI_STATE_FLAGS);
5054 self.context.update_time_counter += 1;
5056 } else if non_shutdown_state == (ChannelState::FundingSent as u32 | ChannelState::OurChannelReady as u32) {
5057 // We got a reorg but not enough to trigger a force close, just ignore.
5060 if self.context.funding_tx_confirmation_height != 0 && self.context.channel_state & !STATE_FLAGS < ChannelState::ChannelReady as u32 {
5061 // We should never see a funding transaction on-chain until we've received
5062 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5063 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5064 // however, may do this and we shouldn't treat it as a bug.
5065 #[cfg(not(fuzzing))]
5066 panic!("Started confirming a channel in a state pre-FundingSent: {}.\n\
5067 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5068 self.context.channel_state);
5070 // We got a reorg but not enough to trigger a force close, just ignore.
5074 if need_commitment_update {
5075 if self.context.channel_state & (ChannelState::MonitorUpdateInProgress as u32) == 0 {
5076 if self.context.channel_state & (ChannelState::PeerDisconnected as u32) == 0 {
5077 let next_per_commitment_point =
5078 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5079 return Some(msgs::ChannelReady {
5080 channel_id: self.context.channel_id,
5081 next_per_commitment_point,
5082 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5086 self.context.monitor_pending_channel_ready = true;
5092 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5093 /// In the first case, we store the confirmation height and calculating the short channel id.
5094 /// In the second, we simply return an Err indicating we need to be force-closed now.
5095 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5096 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5097 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5098 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5100 NS::Target: NodeSigner,
5103 let mut msgs = (None, None);
5104 if let Some(funding_txo) = self.context.get_funding_txo() {
5105 for &(index_in_block, tx) in txdata.iter() {
5106 // Check if the transaction is the expected funding transaction, and if it is,
5107 // check that it pays the right amount to the right script.
5108 if self.context.funding_tx_confirmation_height == 0 {
5109 if tx.txid() == funding_txo.txid {
5110 let txo_idx = funding_txo.index as usize;
5111 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5112 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5113 if self.context.is_outbound() {
5114 // If we generated the funding transaction and it doesn't match what it
5115 // should, the client is really broken and we should just panic and
5116 // tell them off. That said, because hash collisions happen with high
5117 // probability in fuzzing mode, if we're fuzzing we just close the
5118 // channel and move on.
5119 #[cfg(not(fuzzing))]
5120 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5122 self.context.update_time_counter += 1;
5123 let err_reason = "funding tx had wrong script/value or output index";
5124 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5126 if self.context.is_outbound() {
5127 if !tx.is_coin_base() {
5128 for input in tx.input.iter() {
5129 if input.witness.is_empty() {
5130 // We generated a malleable funding transaction, implying we've
5131 // just exposed ourselves to funds loss to our counterparty.
5132 #[cfg(not(fuzzing))]
5133 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5138 self.context.funding_tx_confirmation_height = height;
5139 self.context.funding_tx_confirmed_in = Some(*block_hash);
5140 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5141 Ok(scid) => Some(scid),
5142 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5145 // If this is a coinbase transaction and not a 0-conf channel
5146 // we should update our min_depth to 100 to handle coinbase maturity
5147 if tx.is_coin_base() &&
5148 self.context.minimum_depth.unwrap_or(0) > 0 &&
5149 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5150 self.context.minimum_depth = Some(COINBASE_MATURITY);
5153 // If we allow 1-conf funding, we may need to check for channel_ready here and
5154 // send it immediately instead of waiting for a best_block_updated call (which
5155 // may have already happened for this block).
5156 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5157 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5158 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5159 msgs = (Some(channel_ready), announcement_sigs);
5162 for inp in tx.input.iter() {
5163 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5164 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5165 return Err(ClosureReason::CommitmentTxConfirmed);
5173 /// When a new block is connected, we check the height of the block against outbound holding
5174 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5175 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5176 /// handled by the ChannelMonitor.
5178 /// If we return Err, the channel may have been closed, at which point the standard
5179 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5182 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5184 pub fn best_block_updated<NS: Deref, L: Deref>(
5185 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5186 node_signer: &NS, user_config: &UserConfig, logger: &L
5187 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5189 NS::Target: NodeSigner,
5192 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5195 fn do_best_block_updated<NS: Deref, L: Deref>(
5196 &mut self, height: u32, highest_header_time: u32,
5197 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5198 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5200 NS::Target: NodeSigner,
5203 let mut timed_out_htlcs = Vec::new();
5204 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5205 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5207 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5208 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5210 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5211 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5212 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5220 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5222 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5223 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5224 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5226 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5227 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5230 let non_shutdown_state = self.context.channel_state & (!MULTI_STATE_FLAGS);
5231 if non_shutdown_state & !STATE_FLAGS >= ChannelState::ChannelReady as u32 ||
5232 (non_shutdown_state & ChannelState::OurChannelReady as u32) == ChannelState::OurChannelReady as u32 {
5233 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5234 if self.context.funding_tx_confirmation_height == 0 {
5235 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5236 // zero if it has been reorged out, however in either case, our state flags
5237 // indicate we've already sent a channel_ready
5238 funding_tx_confirmations = 0;
5241 // If we've sent channel_ready (or have both sent and received channel_ready), and
5242 // the funding transaction has become unconfirmed,
5243 // close the channel and hope we can get the latest state on chain (because presumably
5244 // the funding transaction is at least still in the mempool of most nodes).
5246 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5247 // 0-conf channel, but not doing so may lead to the
5248 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5250 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5251 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5252 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5253 return Err(ClosureReason::ProcessingError { err: err_reason });
5255 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5256 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5257 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5258 // If funding_tx_confirmed_in is unset, the channel must not be active
5259 assert!(non_shutdown_state & !STATE_FLAGS <= ChannelState::ChannelReady as u32);
5260 assert_eq!(non_shutdown_state & ChannelState::OurChannelReady as u32, 0);
5261 return Err(ClosureReason::FundingTimedOut);
5264 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5265 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5267 Ok((None, timed_out_htlcs, announcement_sigs))
5270 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5271 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5272 /// before the channel has reached channel_ready and we can just wait for more blocks.
5273 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5274 if self.context.funding_tx_confirmation_height != 0 {
5275 // We handle the funding disconnection by calling best_block_updated with a height one
5276 // below where our funding was connected, implying a reorg back to conf_height - 1.
5277 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5278 // We use the time field to bump the current time we set on channel updates if its
5279 // larger. If we don't know that time has moved forward, we can just set it to the last
5280 // time we saw and it will be ignored.
5281 let best_time = self.context.update_time_counter;
5282 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5283 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5284 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5285 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5286 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5292 // We never learned about the funding confirmation anyway, just ignore
5297 // Methods to get unprompted messages to send to the remote end (or where we already returned
5298 // something in the handler for the message that prompted this message):
5300 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5301 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5302 /// directions). Should be used for both broadcasted announcements and in response to an
5303 /// AnnouncementSignatures message from the remote peer.
5305 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5308 /// This will only return ChannelError::Ignore upon failure.
5310 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5311 fn get_channel_announcement<NS: Deref>(
5312 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5313 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5314 if !self.context.config.announced_channel {
5315 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5317 if !self.context.is_usable() {
5318 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5321 let short_channel_id = self.context.get_short_channel_id()
5322 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5323 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5324 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5325 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5326 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5328 let msg = msgs::UnsignedChannelAnnouncement {
5329 features: channelmanager::provided_channel_features(&user_config),
5332 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5333 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5334 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5335 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5336 excess_data: Vec::new(),
5342 fn get_announcement_sigs<NS: Deref, L: Deref>(
5343 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5344 best_block_height: u32, logger: &L
5345 ) -> Option<msgs::AnnouncementSignatures>
5347 NS::Target: NodeSigner,
5350 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5354 if !self.context.is_usable() {
5358 if self.context.channel_state & ChannelState::PeerDisconnected as u32 != 0 {
5359 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5363 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5367 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5368 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5371 log_trace!(logger, "{:?}", e);
5375 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5377 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5382 match &self.context.holder_signer {
5383 ChannelSignerType::Ecdsa(ecdsa) => {
5384 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5386 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5391 let short_channel_id = match self.context.get_short_channel_id() {
5393 None => return None,
5396 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5398 Some(msgs::AnnouncementSignatures {
5399 channel_id: self.context.channel_id(),
5401 node_signature: our_node_sig,
5402 bitcoin_signature: our_bitcoin_sig,
5405 // TODO (taproot|arik)
5411 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5413 fn sign_channel_announcement<NS: Deref>(
5414 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5415 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5416 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5417 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5418 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5419 let were_node_one = announcement.node_id_1 == our_node_key;
5421 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5422 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5423 match &self.context.holder_signer {
5424 ChannelSignerType::Ecdsa(ecdsa) => {
5425 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5426 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5427 Ok(msgs::ChannelAnnouncement {
5428 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5429 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5430 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5431 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5432 contents: announcement,
5435 // TODO (taproot|arik)
5440 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5444 /// Processes an incoming announcement_signatures message, providing a fully-signed
5445 /// channel_announcement message which we can broadcast and storing our counterparty's
5446 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5447 pub fn announcement_signatures<NS: Deref>(
5448 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5449 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5450 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5451 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5453 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5455 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5456 return Err(ChannelError::Close(format!(
5457 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5458 &announcement, self.context.get_counterparty_node_id())));
5460 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5461 return Err(ChannelError::Close(format!(
5462 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5463 &announcement, self.context.counterparty_funding_pubkey())));
5466 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5467 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5468 return Err(ChannelError::Ignore(
5469 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5472 self.sign_channel_announcement(node_signer, announcement)
5475 /// Gets a signed channel_announcement for this channel, if we previously received an
5476 /// announcement_signatures from our counterparty.
5477 pub fn get_signed_channel_announcement<NS: Deref>(
5478 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5479 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5480 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5483 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5485 Err(_) => return None,
5487 match self.sign_channel_announcement(node_signer, announcement) {
5488 Ok(res) => Some(res),
5493 /// May panic if called on a channel that wasn't immediately-previously
5494 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5495 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5496 assert_eq!(self.context.channel_state & ChannelState::PeerDisconnected as u32, ChannelState::PeerDisconnected as u32);
5497 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5498 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5499 // current to_remote balances. However, it no longer has any use, and thus is now simply
5500 // set to a dummy (but valid, as required by the spec) public key.
5501 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5502 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5503 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5504 let mut pk = [2; 33]; pk[1] = 0xff;
5505 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5506 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5507 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5508 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5511 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5514 self.mark_awaiting_response();
5515 msgs::ChannelReestablish {
5516 channel_id: self.context.channel_id(),
5517 // The protocol has two different commitment number concepts - the "commitment
5518 // transaction number", which starts from 0 and counts up, and the "revocation key
5519 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5520 // commitment transaction numbers by the index which will be used to reveal the
5521 // revocation key for that commitment transaction, which means we have to convert them
5522 // to protocol-level commitment numbers here...
5524 // next_local_commitment_number is the next commitment_signed number we expect to
5525 // receive (indicating if they need to resend one that we missed).
5526 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5527 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5528 // receive, however we track it by the next commitment number for a remote transaction
5529 // (which is one further, as they always revoke previous commitment transaction, not
5530 // the one we send) so we have to decrement by 1. Note that if
5531 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5532 // dropped this channel on disconnect as it hasn't yet reached FundingSent so we can't
5534 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5535 your_last_per_commitment_secret: remote_last_secret,
5536 my_current_per_commitment_point: dummy_pubkey,
5537 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5538 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5539 // txid of that interactive transaction, else we MUST NOT set it.
5540 next_funding_txid: None,
5545 // Send stuff to our remote peers:
5547 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5548 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5549 /// commitment update.
5551 /// `Err`s will only be [`ChannelError::Ignore`].
5552 pub fn queue_add_htlc<F: Deref, L: Deref>(
5553 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5554 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5555 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5556 ) -> Result<(), ChannelError>
5557 where F::Target: FeeEstimator, L::Target: Logger
5560 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5561 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5562 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5564 if let ChannelError::Ignore(_) = err { /* fine */ }
5565 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5570 /// Adds a pending outbound HTLC to this channel, note that you probably want
5571 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5573 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5575 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5576 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5578 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5579 /// we may not yet have sent the previous commitment update messages and will need to
5580 /// regenerate them.
5582 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5583 /// on this [`Channel`] if `force_holding_cell` is false.
5585 /// `Err`s will only be [`ChannelError::Ignore`].
5586 fn send_htlc<F: Deref, L: Deref>(
5587 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5588 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5589 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5590 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5591 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5592 where F::Target: FeeEstimator, L::Target: Logger
5594 if (self.context.channel_state & (ChannelState::ChannelReady as u32 | BOTH_SIDES_SHUTDOWN_MASK)) != (ChannelState::ChannelReady as u32) {
5595 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5597 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5598 if amount_msat > channel_total_msat {
5599 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5602 if amount_msat == 0 {
5603 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5606 let available_balances = self.context.get_available_balances(fee_estimator);
5607 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5608 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5609 available_balances.next_outbound_htlc_minimum_msat)));
5612 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5613 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5614 available_balances.next_outbound_htlc_limit_msat)));
5617 if (self.context.channel_state & (ChannelState::PeerDisconnected as u32)) != 0 {
5618 // Note that this should never really happen, if we're !is_live() on receipt of an
5619 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5620 // the user to send directly into a !is_live() channel. However, if we
5621 // disconnected during the time the previous hop was doing the commitment dance we may
5622 // end up getting here after the forwarding delay. In any case, returning an
5623 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5624 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5627 let need_holding_cell = (self.context.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) != 0;
5628 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5629 payment_hash, amount_msat,
5630 if force_holding_cell { "into holding cell" }
5631 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5632 else { "to peer" });
5634 if need_holding_cell {
5635 force_holding_cell = true;
5638 // Now update local state:
5639 if force_holding_cell {
5640 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5645 onion_routing_packet,
5652 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5653 htlc_id: self.context.next_holder_htlc_id,
5655 payment_hash: payment_hash.clone(),
5657 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5663 let res = msgs::UpdateAddHTLC {
5664 channel_id: self.context.channel_id,
5665 htlc_id: self.context.next_holder_htlc_id,
5669 onion_routing_packet,
5673 self.context.next_holder_htlc_id += 1;
5678 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5679 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5680 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5681 // fail to generate this, we still are at least at a position where upgrading their status
5683 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5684 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5685 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5687 if let Some(state) = new_state {
5688 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5692 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5693 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5694 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5695 // Grab the preimage, if it exists, instead of cloning
5696 let mut reason = OutboundHTLCOutcome::Success(None);
5697 mem::swap(outcome, &mut reason);
5698 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5701 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5702 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5703 debug_assert!(!self.context.is_outbound());
5704 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5705 self.context.feerate_per_kw = feerate;
5706 self.context.pending_update_fee = None;
5709 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5711 let (mut htlcs_ref, counterparty_commitment_tx) =
5712 self.build_commitment_no_state_update(logger);
5713 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5714 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5715 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5717 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5718 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5721 self.context.latest_monitor_update_id += 1;
5722 let monitor_update = ChannelMonitorUpdate {
5723 update_id: self.context.latest_monitor_update_id,
5724 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5725 commitment_txid: counterparty_commitment_txid,
5726 htlc_outputs: htlcs.clone(),
5727 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5728 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5729 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5730 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5731 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5734 self.context.channel_state |= ChannelState::AwaitingRemoteRevoke as u32;
5738 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5739 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5740 where L::Target: Logger
5742 let counterparty_keys = self.context.build_remote_transaction_keys();
5743 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5744 let counterparty_commitment_tx = commitment_stats.tx;
5746 #[cfg(any(test, fuzzing))]
5748 if !self.context.is_outbound() {
5749 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5750 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5751 if let Some(info) = projected_commit_tx_info {
5752 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5753 if info.total_pending_htlcs == total_pending_htlcs
5754 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5755 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5756 && info.feerate == self.context.feerate_per_kw {
5757 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5758 assert_eq!(actual_fee, info.fee);
5764 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5767 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5768 /// generation when we shouldn't change HTLC/channel state.
5769 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5770 // Get the fee tests from `build_commitment_no_state_update`
5771 #[cfg(any(test, fuzzing))]
5772 self.build_commitment_no_state_update(logger);
5774 let counterparty_keys = self.context.build_remote_transaction_keys();
5775 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5776 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5778 match &self.context.holder_signer {
5779 ChannelSignerType::Ecdsa(ecdsa) => {
5780 let (signature, htlc_signatures);
5783 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5784 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5788 let res = ecdsa.sign_counterparty_commitment(
5789 &commitment_stats.tx,
5790 commitment_stats.inbound_htlc_preimages,
5791 commitment_stats.outbound_htlc_preimages,
5792 &self.context.secp_ctx,
5793 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5795 htlc_signatures = res.1;
5797 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5798 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5799 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5800 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5802 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5803 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5804 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5805 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5806 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5807 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5811 Ok((msgs::CommitmentSigned {
5812 channel_id: self.context.channel_id,
5816 partial_signature_with_nonce: None,
5817 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5819 // TODO (taproot|arik)
5825 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5826 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5828 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5829 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5830 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5831 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5832 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5833 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5834 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5835 where F::Target: FeeEstimator, L::Target: Logger
5837 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5838 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
5839 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5842 let monitor_update = self.build_commitment_no_status_check(logger);
5843 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5844 Ok(self.push_ret_blockable_mon_update(monitor_update))
5850 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5852 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5853 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5854 fee_base_msat: msg.contents.fee_base_msat,
5855 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5856 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5858 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5860 self.context.counterparty_forwarding_info = new_forwarding_info;
5866 /// Begins the shutdown process, getting a message for the remote peer and returning all
5867 /// holding cell HTLCs for payment failure.
5869 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
5870 /// [`ChannelMonitorUpdate`] will be returned).
5871 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5872 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5873 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
5875 for htlc in self.context.pending_outbound_htlcs.iter() {
5876 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5877 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5880 if self.context.channel_state & BOTH_SIDES_SHUTDOWN_MASK != 0 {
5881 if (self.context.channel_state & ChannelState::LocalShutdownSent as u32) == ChannelState::LocalShutdownSent as u32 {
5882 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5884 else if (self.context.channel_state & ChannelState::RemoteShutdownSent as u32) == ChannelState::RemoteShutdownSent as u32 {
5885 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5888 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5889 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
5891 assert_eq!(self.context.channel_state & ChannelState::ShutdownComplete as u32, 0);
5892 if self.context.channel_state & (ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateInProgress as u32) != 0 {
5893 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
5896 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
5897 // script is set, we just force-close and call it a day.
5898 let mut chan_closed = false;
5899 if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5903 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5905 None if !chan_closed => {
5906 // use override shutdown script if provided
5907 let shutdown_scriptpubkey = match override_shutdown_script {
5908 Some(script) => script,
5910 // otherwise, use the shutdown scriptpubkey provided by the signer
5911 match signer_provider.get_shutdown_scriptpubkey() {
5912 Ok(scriptpubkey) => scriptpubkey,
5913 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
5917 if !shutdown_scriptpubkey.is_compatible(their_features) {
5918 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
5920 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5926 // From here on out, we may not fail!
5927 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
5928 let shutdown_result = if self.context.channel_state & !STATE_FLAGS < ChannelState::FundingSent as u32 {
5929 let shutdown_result = ShutdownResult {
5930 monitor_update: None,
5931 dropped_outbound_htlcs: Vec::new(),
5932 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5933 channel_id: self.context.channel_id,
5934 counterparty_node_id: self.context.counterparty_node_id,
5936 self.context.channel_state = ChannelState::ShutdownComplete as u32;
5937 Some(shutdown_result)
5939 self.context.channel_state |= ChannelState::LocalShutdownSent as u32;
5942 self.context.update_time_counter += 1;
5944 let monitor_update = if update_shutdown_script {
5945 self.context.latest_monitor_update_id += 1;
5946 let monitor_update = ChannelMonitorUpdate {
5947 update_id: self.context.latest_monitor_update_id,
5948 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5949 scriptpubkey: self.get_closing_scriptpubkey(),
5952 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5953 self.push_ret_blockable_mon_update(monitor_update)
5955 let shutdown = msgs::Shutdown {
5956 channel_id: self.context.channel_id,
5957 scriptpubkey: self.get_closing_scriptpubkey(),
5960 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
5961 // our shutdown until we've committed all of the pending changes.
5962 self.context.holding_cell_update_fee = None;
5963 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5964 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5966 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5967 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5974 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
5975 "we can't both complete shutdown and return a monitor update");
5977 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
5980 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
5981 self.context.holding_cell_htlc_updates.iter()
5982 .flat_map(|htlc_update| {
5984 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
5985 => Some((source, payment_hash)),
5989 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
5993 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
5994 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
5995 pub context: ChannelContext<SP>,
5996 pub unfunded_context: UnfundedChannelContext,
5999 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6000 pub fn new<ES: Deref, F: Deref>(
6001 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6002 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6003 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6004 ) -> Result<OutboundV1Channel<SP>, APIError>
6005 where ES::Target: EntropySource,
6006 F::Target: FeeEstimator
6008 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6009 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6010 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6011 let pubkeys = holder_signer.pubkeys().clone();
6013 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6014 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6016 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6017 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6019 let channel_value_msat = channel_value_satoshis * 1000;
6020 if push_msat > channel_value_msat {
6021 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6023 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6024 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6026 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6027 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6028 // Protocol level safety check in place, although it should never happen because
6029 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6030 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6033 let channel_type = Self::get_initial_channel_type(&config, their_features);
6034 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6036 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6037 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6039 (ConfirmationTarget::NonAnchorChannelFee, 0)
6041 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6043 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6044 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6045 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6046 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6049 let mut secp_ctx = Secp256k1::new();
6050 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6052 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6053 match signer_provider.get_shutdown_scriptpubkey() {
6054 Ok(scriptpubkey) => Some(scriptpubkey),
6055 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6059 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6060 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6061 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6065 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6066 Ok(script) => script,
6067 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6070 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6073 context: ChannelContext {
6076 config: LegacyChannelConfig {
6077 options: config.channel_config.clone(),
6078 announced_channel: config.channel_handshake_config.announced_channel,
6079 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6084 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6086 channel_id: temporary_channel_id,
6087 temporary_channel_id: Some(temporary_channel_id),
6088 channel_state: ChannelState::OurInitSent as u32,
6089 announcement_sigs_state: AnnouncementSigsState::NotSent,
6091 channel_value_satoshis,
6093 latest_monitor_update_id: 0,
6095 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6096 shutdown_scriptpubkey,
6099 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6100 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6103 pending_inbound_htlcs: Vec::new(),
6104 pending_outbound_htlcs: Vec::new(),
6105 holding_cell_htlc_updates: Vec::new(),
6106 pending_update_fee: None,
6107 holding_cell_update_fee: None,
6108 next_holder_htlc_id: 0,
6109 next_counterparty_htlc_id: 0,
6110 update_time_counter: 1,
6112 resend_order: RAACommitmentOrder::CommitmentFirst,
6114 monitor_pending_channel_ready: false,
6115 monitor_pending_revoke_and_ack: false,
6116 monitor_pending_commitment_signed: false,
6117 monitor_pending_forwards: Vec::new(),
6118 monitor_pending_failures: Vec::new(),
6119 monitor_pending_finalized_fulfills: Vec::new(),
6121 signer_pending_commitment_update: false,
6122 signer_pending_funding: false,
6124 #[cfg(debug_assertions)]
6125 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6126 #[cfg(debug_assertions)]
6127 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6129 last_sent_closing_fee: None,
6130 pending_counterparty_closing_signed: None,
6131 expecting_peer_commitment_signed: false,
6132 closing_fee_limits: None,
6133 target_closing_feerate_sats_per_kw: None,
6135 funding_tx_confirmed_in: None,
6136 funding_tx_confirmation_height: 0,
6137 short_channel_id: None,
6138 channel_creation_height: current_chain_height,
6140 feerate_per_kw: commitment_feerate,
6141 counterparty_dust_limit_satoshis: 0,
6142 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6143 counterparty_max_htlc_value_in_flight_msat: 0,
6144 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6145 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6146 holder_selected_channel_reserve_satoshis,
6147 counterparty_htlc_minimum_msat: 0,
6148 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6149 counterparty_max_accepted_htlcs: 0,
6150 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6151 minimum_depth: None, // Filled in in accept_channel
6153 counterparty_forwarding_info: None,
6155 channel_transaction_parameters: ChannelTransactionParameters {
6156 holder_pubkeys: pubkeys,
6157 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6158 is_outbound_from_holder: true,
6159 counterparty_parameters: None,
6160 funding_outpoint: None,
6161 channel_type_features: channel_type.clone()
6163 funding_transaction: None,
6164 is_batch_funding: None,
6166 counterparty_cur_commitment_point: None,
6167 counterparty_prev_commitment_point: None,
6168 counterparty_node_id,
6170 counterparty_shutdown_scriptpubkey: None,
6172 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6174 channel_update_status: ChannelUpdateStatus::Enabled,
6175 closing_signed_in_flight: false,
6177 announcement_sigs: None,
6179 #[cfg(any(test, fuzzing))]
6180 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6181 #[cfg(any(test, fuzzing))]
6182 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6184 workaround_lnd_bug_4006: None,
6185 sent_message_awaiting_response: None,
6187 latest_inbound_scid_alias: None,
6188 outbound_scid_alias,
6190 channel_pending_event_emitted: false,
6191 channel_ready_event_emitted: false,
6193 #[cfg(any(test, fuzzing))]
6194 historical_inbound_htlc_fulfills: HashSet::new(),
6199 blocked_monitor_updates: Vec::new(),
6201 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6205 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6206 /// a funding_created message for the remote peer.
6207 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6208 /// or if called on an inbound channel.
6209 /// Note that channel_id changes during this call!
6210 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6211 /// If an Err is returned, it is a ChannelError::Close.
6212 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6213 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6214 if !self.context.is_outbound() {
6215 panic!("Tried to create outbound funding_created message on an inbound channel!");
6217 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6218 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6220 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6221 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6222 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6223 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6226 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6227 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6229 // Now that we're past error-generating stuff, update our local state:
6231 self.context.channel_state = ChannelState::FundingCreated as u32;
6232 self.context.channel_id = funding_txo.to_channel_id();
6234 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6235 // We can skip this if it is a zero-conf channel.
6236 if funding_transaction.is_coin_base() &&
6237 self.context.minimum_depth.unwrap_or(0) > 0 &&
6238 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6239 self.context.minimum_depth = Some(COINBASE_MATURITY);
6242 self.context.funding_transaction = Some(funding_transaction);
6243 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6245 let funding_created = self.context.get_funding_created_msg(logger);
6246 if funding_created.is_none() {
6247 if !self.context.signer_pending_funding {
6248 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6249 self.context.signer_pending_funding = true;
6253 let channel = Channel {
6254 context: self.context,
6257 Ok((channel, funding_created))
6260 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6261 // The default channel type (ie the first one we try) depends on whether the channel is
6262 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6263 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6264 // with no other changes, and fall back to `only_static_remotekey`.
6265 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6266 if !config.channel_handshake_config.announced_channel &&
6267 config.channel_handshake_config.negotiate_scid_privacy &&
6268 their_features.supports_scid_privacy() {
6269 ret.set_scid_privacy_required();
6272 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6273 // set it now. If they don't understand it, we'll fall back to our default of
6274 // `only_static_remotekey`.
6275 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6276 their_features.supports_anchors_zero_fee_htlc_tx() {
6277 ret.set_anchors_zero_fee_htlc_tx_required();
6283 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6284 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6285 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6286 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6287 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6288 ) -> Result<msgs::OpenChannel, ()>
6290 F::Target: FeeEstimator
6292 if !self.context.is_outbound() || self.context.channel_state != ChannelState::OurInitSent as u32 { return Err(()); }
6293 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6294 // We've exhausted our options
6297 // We support opening a few different types of channels. Try removing our additional
6298 // features one by one until we've either arrived at our default or the counterparty has
6301 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6302 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6303 // checks whether the counterparty supports every feature, this would only happen if the
6304 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6306 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6307 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6308 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6309 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6310 } else if self.context.channel_type.supports_scid_privacy() {
6311 self.context.channel_type.clear_scid_privacy();
6313 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6315 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6316 Ok(self.get_open_channel(chain_hash))
6319 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6320 if !self.context.is_outbound() {
6321 panic!("Tried to open a channel for an inbound channel?");
6323 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6324 panic!("Cannot generate an open_channel after we've moved forward");
6327 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6328 panic!("Tried to send an open_channel for a channel that has already advanced");
6331 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6332 let keys = self.context.get_holder_pubkeys();
6336 temporary_channel_id: self.context.channel_id,
6337 funding_satoshis: self.context.channel_value_satoshis,
6338 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6339 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6340 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6341 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6342 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6343 feerate_per_kw: self.context.feerate_per_kw as u32,
6344 to_self_delay: self.context.get_holder_selected_contest_delay(),
6345 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6346 funding_pubkey: keys.funding_pubkey,
6347 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6348 payment_point: keys.payment_point,
6349 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6350 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6351 first_per_commitment_point,
6352 channel_flags: if self.context.config.announced_channel {1} else {0},
6353 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6354 Some(script) => script.clone().into_inner(),
6355 None => Builder::new().into_script(),
6357 channel_type: Some(self.context.channel_type.clone()),
6362 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6363 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6365 // Check sanity of message fields:
6366 if !self.context.is_outbound() {
6367 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6369 if self.context.channel_state != ChannelState::OurInitSent as u32 {
6370 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6372 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6373 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6375 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6376 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6378 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6379 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6381 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6382 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6383 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6385 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6386 if msg.htlc_minimum_msat >= full_channel_value_msat {
6387 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6389 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6390 if msg.to_self_delay > max_delay_acceptable {
6391 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6393 if msg.max_accepted_htlcs < 1 {
6394 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6396 if msg.max_accepted_htlcs > MAX_HTLCS {
6397 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6400 // Now check against optional parameters as set by config...
6401 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6402 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6404 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6405 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6407 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6408 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6410 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6411 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6413 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6414 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6416 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6417 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6419 if msg.minimum_depth > peer_limits.max_minimum_depth {
6420 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6423 if let Some(ty) = &msg.channel_type {
6424 if *ty != self.context.channel_type {
6425 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6427 } else if their_features.supports_channel_type() {
6428 // Assume they've accepted the channel type as they said they understand it.
6430 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6431 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6432 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6434 self.context.channel_type = channel_type.clone();
6435 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6438 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6439 match &msg.shutdown_scriptpubkey {
6440 &Some(ref script) => {
6441 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6442 if script.len() == 0 {
6445 if !script::is_bolt2_compliant(&script, their_features) {
6446 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6448 Some(script.clone())
6451 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6453 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6458 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6459 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6460 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6461 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6462 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6464 if peer_limits.trust_own_funding_0conf {
6465 self.context.minimum_depth = Some(msg.minimum_depth);
6467 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6470 let counterparty_pubkeys = ChannelPublicKeys {
6471 funding_pubkey: msg.funding_pubkey,
6472 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6473 payment_point: msg.payment_point,
6474 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6475 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6478 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6479 selected_contest_delay: msg.to_self_delay,
6480 pubkeys: counterparty_pubkeys,
6483 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6484 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6486 self.context.channel_state = ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32;
6487 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6493 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6494 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6495 pub context: ChannelContext<SP>,
6496 pub unfunded_context: UnfundedChannelContext,
6499 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6500 /// Creates a new channel from a remote sides' request for one.
6501 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6502 pub fn new<ES: Deref, F: Deref, L: Deref>(
6503 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6504 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6505 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6506 current_chain_height: u32, logger: &L, is_0conf: bool,
6507 ) -> Result<InboundV1Channel<SP>, ChannelError>
6508 where ES::Target: EntropySource,
6509 F::Target: FeeEstimator,
6512 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6513 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6515 // First check the channel type is known, failing before we do anything else if we don't
6516 // support this channel type.
6517 let channel_type = if let Some(channel_type) = &msg.channel_type {
6518 if channel_type.supports_any_optional_bits() {
6519 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6522 // We only support the channel types defined by the `ChannelManager` in
6523 // `provided_channel_type_features`. The channel type must always support
6524 // `static_remote_key`.
6525 if !channel_type.requires_static_remote_key() {
6526 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6528 // Make sure we support all of the features behind the channel type.
6529 if !channel_type.is_subset(our_supported_features) {
6530 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6532 if channel_type.requires_scid_privacy() && announced_channel {
6533 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6535 channel_type.clone()
6537 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6538 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6539 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6544 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6545 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6546 let pubkeys = holder_signer.pubkeys().clone();
6547 let counterparty_pubkeys = ChannelPublicKeys {
6548 funding_pubkey: msg.funding_pubkey,
6549 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6550 payment_point: msg.payment_point,
6551 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6552 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6555 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6556 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6559 // Check sanity of message fields:
6560 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6561 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6563 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6564 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6566 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6567 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6569 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6570 if msg.push_msat > full_channel_value_msat {
6571 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6573 if msg.dust_limit_satoshis > msg.funding_satoshis {
6574 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6576 if msg.htlc_minimum_msat >= full_channel_value_msat {
6577 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6579 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6581 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6582 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6583 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6585 if msg.max_accepted_htlcs < 1 {
6586 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6588 if msg.max_accepted_htlcs > MAX_HTLCS {
6589 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6592 // Now check against optional parameters as set by config...
6593 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6594 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6596 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6597 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6599 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6600 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6602 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6603 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6605 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6606 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6608 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6609 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6611 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6612 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6615 // Convert things into internal flags and prep our state:
6617 if config.channel_handshake_limits.force_announced_channel_preference {
6618 if config.channel_handshake_config.announced_channel != announced_channel {
6619 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6623 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6624 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6625 // Protocol level safety check in place, although it should never happen because
6626 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6627 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6629 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6630 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6632 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6633 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6634 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6636 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6637 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6640 // check if the funder's amount for the initial commitment tx is sufficient
6641 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6642 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6643 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6647 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6648 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6649 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6650 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6653 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6654 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6655 // want to push much to us), our counterparty should always have more than our reserve.
6656 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6657 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6660 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6661 match &msg.shutdown_scriptpubkey {
6662 &Some(ref script) => {
6663 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6664 if script.len() == 0 {
6667 if !script::is_bolt2_compliant(&script, their_features) {
6668 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6670 Some(script.clone())
6673 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6675 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6680 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6681 match signer_provider.get_shutdown_scriptpubkey() {
6682 Ok(scriptpubkey) => Some(scriptpubkey),
6683 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6687 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6688 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6689 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6693 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6694 Ok(script) => script,
6695 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6698 let mut secp_ctx = Secp256k1::new();
6699 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6701 let minimum_depth = if is_0conf {
6704 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6708 context: ChannelContext {
6711 config: LegacyChannelConfig {
6712 options: config.channel_config.clone(),
6714 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6719 inbound_handshake_limits_override: None,
6721 temporary_channel_id: Some(msg.temporary_channel_id),
6722 channel_id: msg.temporary_channel_id,
6723 channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
6724 announcement_sigs_state: AnnouncementSigsState::NotSent,
6727 latest_monitor_update_id: 0,
6729 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6730 shutdown_scriptpubkey,
6733 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6734 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6735 value_to_self_msat: msg.push_msat,
6737 pending_inbound_htlcs: Vec::new(),
6738 pending_outbound_htlcs: Vec::new(),
6739 holding_cell_htlc_updates: Vec::new(),
6740 pending_update_fee: None,
6741 holding_cell_update_fee: None,
6742 next_holder_htlc_id: 0,
6743 next_counterparty_htlc_id: 0,
6744 update_time_counter: 1,
6746 resend_order: RAACommitmentOrder::CommitmentFirst,
6748 monitor_pending_channel_ready: false,
6749 monitor_pending_revoke_and_ack: false,
6750 monitor_pending_commitment_signed: false,
6751 monitor_pending_forwards: Vec::new(),
6752 monitor_pending_failures: Vec::new(),
6753 monitor_pending_finalized_fulfills: Vec::new(),
6755 signer_pending_commitment_update: false,
6756 signer_pending_funding: false,
6758 #[cfg(debug_assertions)]
6759 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6760 #[cfg(debug_assertions)]
6761 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6763 last_sent_closing_fee: None,
6764 pending_counterparty_closing_signed: None,
6765 expecting_peer_commitment_signed: false,
6766 closing_fee_limits: None,
6767 target_closing_feerate_sats_per_kw: None,
6769 funding_tx_confirmed_in: None,
6770 funding_tx_confirmation_height: 0,
6771 short_channel_id: None,
6772 channel_creation_height: current_chain_height,
6774 feerate_per_kw: msg.feerate_per_kw,
6775 channel_value_satoshis: msg.funding_satoshis,
6776 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
6777 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6778 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
6779 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
6780 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
6781 holder_selected_channel_reserve_satoshis,
6782 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
6783 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6784 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
6785 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6788 counterparty_forwarding_info: None,
6790 channel_transaction_parameters: ChannelTransactionParameters {
6791 holder_pubkeys: pubkeys,
6792 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6793 is_outbound_from_holder: false,
6794 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
6795 selected_contest_delay: msg.to_self_delay,
6796 pubkeys: counterparty_pubkeys,
6798 funding_outpoint: None,
6799 channel_type_features: channel_type.clone()
6801 funding_transaction: None,
6802 is_batch_funding: None,
6804 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
6805 counterparty_prev_commitment_point: None,
6806 counterparty_node_id,
6808 counterparty_shutdown_scriptpubkey,
6810 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6812 channel_update_status: ChannelUpdateStatus::Enabled,
6813 closing_signed_in_flight: false,
6815 announcement_sigs: None,
6817 #[cfg(any(test, fuzzing))]
6818 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6819 #[cfg(any(test, fuzzing))]
6820 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6822 workaround_lnd_bug_4006: None,
6823 sent_message_awaiting_response: None,
6825 latest_inbound_scid_alias: None,
6826 outbound_scid_alias: 0,
6828 channel_pending_event_emitted: false,
6829 channel_ready_event_emitted: false,
6831 #[cfg(any(test, fuzzing))]
6832 historical_inbound_htlc_fulfills: HashSet::new(),
6837 blocked_monitor_updates: Vec::new(),
6839 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6845 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
6846 /// should be sent back to the counterparty node.
6848 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6849 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
6850 if self.context.is_outbound() {
6851 panic!("Tried to send accept_channel for an outbound channel?");
6853 if self.context.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
6854 panic!("Tried to send accept_channel after channel had moved forward");
6856 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6857 panic!("Tried to send an accept_channel for a channel that has already advanced");
6860 self.generate_accept_channel_message()
6863 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
6864 /// inbound channel. If the intention is to accept an inbound channel, use
6865 /// [`InboundV1Channel::accept_inbound_channel`] instead.
6867 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6868 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
6869 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6870 let keys = self.context.get_holder_pubkeys();
6872 msgs::AcceptChannel {
6873 temporary_channel_id: self.context.channel_id,
6874 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6875 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6876 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6877 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6878 minimum_depth: self.context.minimum_depth.unwrap(),
6879 to_self_delay: self.context.get_holder_selected_contest_delay(),
6880 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6881 funding_pubkey: keys.funding_pubkey,
6882 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6883 payment_point: keys.payment_point,
6884 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6885 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6886 first_per_commitment_point,
6887 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6888 Some(script) => script.clone().into_inner(),
6889 None => Builder::new().into_script(),
6891 channel_type: Some(self.context.channel_type.clone()),
6893 next_local_nonce: None,
6897 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
6898 /// inbound channel without accepting it.
6900 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
6902 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
6903 self.generate_accept_channel_message()
6906 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
6907 let funding_script = self.context.get_funding_redeemscript();
6909 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6910 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
6911 let trusted_tx = initial_commitment_tx.trust();
6912 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6913 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6914 // They sign the holder commitment transaction...
6915 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
6916 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
6917 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
6918 encode::serialize_hex(&funding_script), &self.context.channel_id());
6919 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
6921 Ok(initial_commitment_tx)
6924 pub fn funding_created<L: Deref>(
6925 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
6926 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
6930 if self.context.is_outbound() {
6931 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
6933 if self.context.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
6934 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
6935 // remember the channel, so it's safe to just send an error_message here and drop the
6937 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
6939 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6940 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6941 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6942 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6945 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
6946 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6947 // This is an externally observable change before we finish all our checks. In particular
6948 // check_funding_created_signature may fail.
6949 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6951 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
6953 Err(ChannelError::Close(e)) => {
6954 self.context.channel_transaction_parameters.funding_outpoint = None;
6955 return Err((self, ChannelError::Close(e)));
6958 // The only error we know how to handle is ChannelError::Close, so we fall over here
6959 // to make sure we don't continue with an inconsistent state.
6960 panic!("unexpected error type from check_funding_created_signature {:?}", e);
6964 let holder_commitment_tx = HolderCommitmentTransaction::new(
6965 initial_commitment_tx,
6968 &self.context.get_holder_pubkeys().funding_pubkey,
6969 self.context.counterparty_funding_pubkey()
6972 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
6973 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6976 // Now that we're past error-generating stuff, update our local state:
6978 self.context.channel_state = ChannelState::FundingSent as u32;
6979 self.context.channel_id = funding_txo.to_channel_id();
6980 self.context.cur_counterparty_commitment_transaction_number -= 1;
6981 self.context.cur_holder_commitment_transaction_number -= 1;
6983 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
6985 let funding_redeemscript = self.context.get_funding_redeemscript();
6986 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6987 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6988 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6989 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6990 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6991 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6992 shutdown_script, self.context.get_holder_selected_contest_delay(),
6993 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
6994 &self.context.channel_transaction_parameters,
6995 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6997 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6998 channel_monitor.provide_initial_counterparty_commitment_tx(
6999 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7000 self.context.cur_counterparty_commitment_transaction_number + 1,
7001 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7002 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7003 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7005 log_info!(logger, "{} funding_signed for peer for channel {}",
7006 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7008 // Promote the channel to a full-fledged one now that we have updated the state and have a
7009 // `ChannelMonitor`.
7010 let mut channel = Channel {
7011 context: self.context,
7013 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7014 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7016 Ok((channel, funding_signed, channel_monitor))
7020 const SERIALIZATION_VERSION: u8 = 3;
7021 const MIN_SERIALIZATION_VERSION: u8 = 3;
7023 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7029 impl Writeable for ChannelUpdateStatus {
7030 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7031 // We only care about writing out the current state as it was announced, ie only either
7032 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7033 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7035 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7036 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7037 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7038 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7044 impl Readable for ChannelUpdateStatus {
7045 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7046 Ok(match <u8 as Readable>::read(reader)? {
7047 0 => ChannelUpdateStatus::Enabled,
7048 1 => ChannelUpdateStatus::Disabled,
7049 _ => return Err(DecodeError::InvalidValue),
7054 impl Writeable for AnnouncementSigsState {
7055 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7056 // We only care about writing out the current state as if we had just disconnected, at
7057 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7059 AnnouncementSigsState::NotSent => 0u8.write(writer),
7060 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7061 AnnouncementSigsState::Committed => 0u8.write(writer),
7062 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7067 impl Readable for AnnouncementSigsState {
7068 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7069 Ok(match <u8 as Readable>::read(reader)? {
7070 0 => AnnouncementSigsState::NotSent,
7071 1 => AnnouncementSigsState::PeerReceived,
7072 _ => return Err(DecodeError::InvalidValue),
7077 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7078 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7079 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7082 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7084 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7085 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7086 // the low bytes now and the optional high bytes later.
7087 let user_id_low = self.context.user_id as u64;
7088 user_id_low.write(writer)?;
7090 // Version 1 deserializers expected to read parts of the config object here. Version 2
7091 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7092 // `minimum_depth` we simply write dummy values here.
7093 writer.write_all(&[0; 8])?;
7095 self.context.channel_id.write(writer)?;
7096 (self.context.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
7097 self.context.channel_value_satoshis.write(writer)?;
7099 self.context.latest_monitor_update_id.write(writer)?;
7101 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7102 // deserialized from that format.
7103 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7104 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7105 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7107 self.context.destination_script.write(writer)?;
7109 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7110 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7111 self.context.value_to_self_msat.write(writer)?;
7113 let mut dropped_inbound_htlcs = 0;
7114 for htlc in self.context.pending_inbound_htlcs.iter() {
7115 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7116 dropped_inbound_htlcs += 1;
7119 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7120 for htlc in self.context.pending_inbound_htlcs.iter() {
7121 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7124 htlc.htlc_id.write(writer)?;
7125 htlc.amount_msat.write(writer)?;
7126 htlc.cltv_expiry.write(writer)?;
7127 htlc.payment_hash.write(writer)?;
7129 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7130 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7132 htlc_state.write(writer)?;
7134 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7136 htlc_state.write(writer)?;
7138 &InboundHTLCState::Committed => {
7141 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7143 removal_reason.write(writer)?;
7148 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7149 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7150 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7152 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7153 for htlc in self.context.pending_outbound_htlcs.iter() {
7154 htlc.htlc_id.write(writer)?;
7155 htlc.amount_msat.write(writer)?;
7156 htlc.cltv_expiry.write(writer)?;
7157 htlc.payment_hash.write(writer)?;
7158 htlc.source.write(writer)?;
7160 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7162 onion_packet.write(writer)?;
7164 &OutboundHTLCState::Committed => {
7167 &OutboundHTLCState::RemoteRemoved(_) => {
7168 // Treat this as a Committed because we haven't received the CS - they'll
7169 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7172 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7174 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7175 preimages.push(preimage);
7177 let reason: Option<&HTLCFailReason> = outcome.into();
7178 reason.write(writer)?;
7180 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7182 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7183 preimages.push(preimage);
7185 let reason: Option<&HTLCFailReason> = outcome.into();
7186 reason.write(writer)?;
7189 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7190 pending_outbound_blinding_points.push(htlc.blinding_point);
7193 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7194 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7195 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7196 for update in self.context.holding_cell_htlc_updates.iter() {
7198 &HTLCUpdateAwaitingACK::AddHTLC {
7199 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7200 blinding_point, skimmed_fee_msat,
7203 amount_msat.write(writer)?;
7204 cltv_expiry.write(writer)?;
7205 payment_hash.write(writer)?;
7206 source.write(writer)?;
7207 onion_routing_packet.write(writer)?;
7209 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7210 holding_cell_blinding_points.push(blinding_point);
7212 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7214 payment_preimage.write(writer)?;
7215 htlc_id.write(writer)?;
7217 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7219 htlc_id.write(writer)?;
7220 err_packet.write(writer)?;
7225 match self.context.resend_order {
7226 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7227 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7230 self.context.monitor_pending_channel_ready.write(writer)?;
7231 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7232 self.context.monitor_pending_commitment_signed.write(writer)?;
7234 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7235 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7236 pending_forward.write(writer)?;
7237 htlc_id.write(writer)?;
7240 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7241 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7242 htlc_source.write(writer)?;
7243 payment_hash.write(writer)?;
7244 fail_reason.write(writer)?;
7247 if self.context.is_outbound() {
7248 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7249 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7250 Some(feerate).write(writer)?;
7252 // As for inbound HTLCs, if the update was only announced and never committed in a
7253 // commitment_signed, drop it.
7254 None::<u32>.write(writer)?;
7256 self.context.holding_cell_update_fee.write(writer)?;
7258 self.context.next_holder_htlc_id.write(writer)?;
7259 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7260 self.context.update_time_counter.write(writer)?;
7261 self.context.feerate_per_kw.write(writer)?;
7263 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7264 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7265 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7266 // consider the stale state on reload.
7269 self.context.funding_tx_confirmed_in.write(writer)?;
7270 self.context.funding_tx_confirmation_height.write(writer)?;
7271 self.context.short_channel_id.write(writer)?;
7273 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7274 self.context.holder_dust_limit_satoshis.write(writer)?;
7275 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7277 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7278 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7280 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7281 self.context.holder_htlc_minimum_msat.write(writer)?;
7282 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7284 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7285 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7287 match &self.context.counterparty_forwarding_info {
7290 info.fee_base_msat.write(writer)?;
7291 info.fee_proportional_millionths.write(writer)?;
7292 info.cltv_expiry_delta.write(writer)?;
7294 None => 0u8.write(writer)?
7297 self.context.channel_transaction_parameters.write(writer)?;
7298 self.context.funding_transaction.write(writer)?;
7300 self.context.counterparty_cur_commitment_point.write(writer)?;
7301 self.context.counterparty_prev_commitment_point.write(writer)?;
7302 self.context.counterparty_node_id.write(writer)?;
7304 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7306 self.context.commitment_secrets.write(writer)?;
7308 self.context.channel_update_status.write(writer)?;
7310 #[cfg(any(test, fuzzing))]
7311 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7312 #[cfg(any(test, fuzzing))]
7313 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7314 htlc.write(writer)?;
7317 // If the channel type is something other than only-static-remote-key, then we need to have
7318 // older clients fail to deserialize this channel at all. If the type is
7319 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7321 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7322 Some(&self.context.channel_type) } else { None };
7324 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7325 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7326 // a different percentage of the channel value then 10%, which older versions of LDK used
7327 // to set it to before the percentage was made configurable.
7328 let serialized_holder_selected_reserve =
7329 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7330 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7332 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7333 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7334 let serialized_holder_htlc_max_in_flight =
7335 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7336 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7338 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7339 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7341 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7342 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7343 // we write the high bytes as an option here.
7344 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7346 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7348 write_tlv_fields!(writer, {
7349 (0, self.context.announcement_sigs, option),
7350 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7351 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7352 // them twice, once with their original default values above, and once as an option
7353 // here. On the read side, old versions will simply ignore the odd-type entries here,
7354 // and new versions map the default values to None and allow the TLV entries here to
7356 (1, self.context.minimum_depth, option),
7357 (2, chan_type, option),
7358 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7359 (4, serialized_holder_selected_reserve, option),
7360 (5, self.context.config, required),
7361 (6, serialized_holder_htlc_max_in_flight, option),
7362 (7, self.context.shutdown_scriptpubkey, option),
7363 (8, self.context.blocked_monitor_updates, optional_vec),
7364 (9, self.context.target_closing_feerate_sats_per_kw, option),
7365 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7366 (13, self.context.channel_creation_height, required),
7367 (15, preimages, required_vec),
7368 (17, self.context.announcement_sigs_state, required),
7369 (19, self.context.latest_inbound_scid_alias, option),
7370 (21, self.context.outbound_scid_alias, required),
7371 (23, channel_ready_event_emitted, option),
7372 (25, user_id_high_opt, option),
7373 (27, self.context.channel_keys_id, required),
7374 (28, holder_max_accepted_htlcs, option),
7375 (29, self.context.temporary_channel_id, option),
7376 (31, channel_pending_event_emitted, option),
7377 (35, pending_outbound_skimmed_fees, optional_vec),
7378 (37, holding_cell_skimmed_fees, optional_vec),
7379 (38, self.context.is_batch_funding, option),
7380 (39, pending_outbound_blinding_points, optional_vec),
7381 (41, holding_cell_blinding_points, optional_vec),
7388 const MAX_ALLOC_SIZE: usize = 64*1024;
7389 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7391 ES::Target: EntropySource,
7392 SP::Target: SignerProvider
7394 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7395 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7396 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7398 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7399 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7400 // the low bytes now and the high bytes later.
7401 let user_id_low: u64 = Readable::read(reader)?;
7403 let mut config = Some(LegacyChannelConfig::default());
7405 // Read the old serialization of the ChannelConfig from version 0.0.98.
7406 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7407 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7408 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7409 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7411 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7412 let mut _val: u64 = Readable::read(reader)?;
7415 let channel_id = Readable::read(reader)?;
7416 let channel_state = Readable::read(reader)?;
7417 let channel_value_satoshis = Readable::read(reader)?;
7419 let latest_monitor_update_id = Readable::read(reader)?;
7421 let mut keys_data = None;
7423 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7424 // the `channel_keys_id` TLV is present below.
7425 let keys_len: u32 = Readable::read(reader)?;
7426 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7427 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7428 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7429 let mut data = [0; 1024];
7430 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7431 reader.read_exact(read_slice)?;
7432 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7436 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7437 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7438 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7441 let destination_script = Readable::read(reader)?;
7443 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7444 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7445 let value_to_self_msat = Readable::read(reader)?;
7447 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7449 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7450 for _ in 0..pending_inbound_htlc_count {
7451 pending_inbound_htlcs.push(InboundHTLCOutput {
7452 htlc_id: Readable::read(reader)?,
7453 amount_msat: Readable::read(reader)?,
7454 cltv_expiry: Readable::read(reader)?,
7455 payment_hash: Readable::read(reader)?,
7456 state: match <u8 as Readable>::read(reader)? {
7457 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7458 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7459 3 => InboundHTLCState::Committed,
7460 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7461 _ => return Err(DecodeError::InvalidValue),
7466 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7467 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7468 for _ in 0..pending_outbound_htlc_count {
7469 pending_outbound_htlcs.push(OutboundHTLCOutput {
7470 htlc_id: Readable::read(reader)?,
7471 amount_msat: Readable::read(reader)?,
7472 cltv_expiry: Readable::read(reader)?,
7473 payment_hash: Readable::read(reader)?,
7474 source: Readable::read(reader)?,
7475 state: match <u8 as Readable>::read(reader)? {
7476 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7477 1 => OutboundHTLCState::Committed,
7479 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7480 OutboundHTLCState::RemoteRemoved(option.into())
7483 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7484 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7487 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7488 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7490 _ => return Err(DecodeError::InvalidValue),
7492 skimmed_fee_msat: None,
7493 blinding_point: None,
7497 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7498 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7499 for _ in 0..holding_cell_htlc_update_count {
7500 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7501 0 => HTLCUpdateAwaitingACK::AddHTLC {
7502 amount_msat: Readable::read(reader)?,
7503 cltv_expiry: Readable::read(reader)?,
7504 payment_hash: Readable::read(reader)?,
7505 source: Readable::read(reader)?,
7506 onion_routing_packet: Readable::read(reader)?,
7507 skimmed_fee_msat: None,
7508 blinding_point: None,
7510 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7511 payment_preimage: Readable::read(reader)?,
7512 htlc_id: Readable::read(reader)?,
7514 2 => HTLCUpdateAwaitingACK::FailHTLC {
7515 htlc_id: Readable::read(reader)?,
7516 err_packet: Readable::read(reader)?,
7518 _ => return Err(DecodeError::InvalidValue),
7522 let resend_order = match <u8 as Readable>::read(reader)? {
7523 0 => RAACommitmentOrder::CommitmentFirst,
7524 1 => RAACommitmentOrder::RevokeAndACKFirst,
7525 _ => return Err(DecodeError::InvalidValue),
7528 let monitor_pending_channel_ready = Readable::read(reader)?;
7529 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7530 let monitor_pending_commitment_signed = Readable::read(reader)?;
7532 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7533 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7534 for _ in 0..monitor_pending_forwards_count {
7535 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7538 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7539 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7540 for _ in 0..monitor_pending_failures_count {
7541 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7544 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7546 let holding_cell_update_fee = Readable::read(reader)?;
7548 let next_holder_htlc_id = Readable::read(reader)?;
7549 let next_counterparty_htlc_id = Readable::read(reader)?;
7550 let update_time_counter = Readable::read(reader)?;
7551 let feerate_per_kw = Readable::read(reader)?;
7553 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7554 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7555 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7556 // consider the stale state on reload.
7557 match <u8 as Readable>::read(reader)? {
7560 let _: u32 = Readable::read(reader)?;
7561 let _: u64 = Readable::read(reader)?;
7562 let _: Signature = Readable::read(reader)?;
7564 _ => return Err(DecodeError::InvalidValue),
7567 let funding_tx_confirmed_in = Readable::read(reader)?;
7568 let funding_tx_confirmation_height = Readable::read(reader)?;
7569 let short_channel_id = Readable::read(reader)?;
7571 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7572 let holder_dust_limit_satoshis = Readable::read(reader)?;
7573 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7574 let mut counterparty_selected_channel_reserve_satoshis = None;
7576 // Read the old serialization from version 0.0.98.
7577 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7579 // Read the 8 bytes of backwards-compatibility data.
7580 let _dummy: u64 = Readable::read(reader)?;
7582 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7583 let holder_htlc_minimum_msat = Readable::read(reader)?;
7584 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7586 let mut minimum_depth = None;
7588 // Read the old serialization from version 0.0.98.
7589 minimum_depth = Some(Readable::read(reader)?);
7591 // Read the 4 bytes of backwards-compatibility data.
7592 let _dummy: u32 = Readable::read(reader)?;
7595 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7597 1 => Some(CounterpartyForwardingInfo {
7598 fee_base_msat: Readable::read(reader)?,
7599 fee_proportional_millionths: Readable::read(reader)?,
7600 cltv_expiry_delta: Readable::read(reader)?,
7602 _ => return Err(DecodeError::InvalidValue),
7605 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7606 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7608 let counterparty_cur_commitment_point = Readable::read(reader)?;
7610 let counterparty_prev_commitment_point = Readable::read(reader)?;
7611 let counterparty_node_id = Readable::read(reader)?;
7613 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7614 let commitment_secrets = Readable::read(reader)?;
7616 let channel_update_status = Readable::read(reader)?;
7618 #[cfg(any(test, fuzzing))]
7619 let mut historical_inbound_htlc_fulfills = HashSet::new();
7620 #[cfg(any(test, fuzzing))]
7622 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7623 for _ in 0..htlc_fulfills_len {
7624 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7628 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7629 Some((feerate, if channel_parameters.is_outbound_from_holder {
7630 FeeUpdateState::Outbound
7632 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7638 let mut announcement_sigs = None;
7639 let mut target_closing_feerate_sats_per_kw = None;
7640 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7641 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7642 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7643 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7644 // only, so we default to that if none was written.
7645 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7646 let mut channel_creation_height = Some(serialized_height);
7647 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7649 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7650 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7651 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7652 let mut latest_inbound_scid_alias = None;
7653 let mut outbound_scid_alias = None;
7654 let mut channel_pending_event_emitted = None;
7655 let mut channel_ready_event_emitted = None;
7657 let mut user_id_high_opt: Option<u64> = None;
7658 let mut channel_keys_id: Option<[u8; 32]> = None;
7659 let mut temporary_channel_id: Option<ChannelId> = None;
7660 let mut holder_max_accepted_htlcs: Option<u16> = None;
7662 let mut blocked_monitor_updates = Some(Vec::new());
7664 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7665 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7667 let mut is_batch_funding: Option<()> = None;
7669 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7670 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7672 read_tlv_fields!(reader, {
7673 (0, announcement_sigs, option),
7674 (1, minimum_depth, option),
7675 (2, channel_type, option),
7676 (3, counterparty_selected_channel_reserve_satoshis, option),
7677 (4, holder_selected_channel_reserve_satoshis, option),
7678 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7679 (6, holder_max_htlc_value_in_flight_msat, option),
7680 (7, shutdown_scriptpubkey, option),
7681 (8, blocked_monitor_updates, optional_vec),
7682 (9, target_closing_feerate_sats_per_kw, option),
7683 (11, monitor_pending_finalized_fulfills, optional_vec),
7684 (13, channel_creation_height, option),
7685 (15, preimages_opt, optional_vec),
7686 (17, announcement_sigs_state, option),
7687 (19, latest_inbound_scid_alias, option),
7688 (21, outbound_scid_alias, option),
7689 (23, channel_ready_event_emitted, option),
7690 (25, user_id_high_opt, option),
7691 (27, channel_keys_id, option),
7692 (28, holder_max_accepted_htlcs, option),
7693 (29, temporary_channel_id, option),
7694 (31, channel_pending_event_emitted, option),
7695 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7696 (37, holding_cell_skimmed_fees_opt, optional_vec),
7697 (38, is_batch_funding, option),
7698 (39, pending_outbound_blinding_points_opt, optional_vec),
7699 (41, holding_cell_blinding_points_opt, optional_vec),
7702 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7703 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7704 // If we've gotten to the funding stage of the channel, populate the signer with its
7705 // required channel parameters.
7706 let non_shutdown_state = channel_state & (!MULTI_STATE_FLAGS);
7707 if non_shutdown_state & !STATE_FLAGS >= (ChannelState::FundingCreated as u32) {
7708 holder_signer.provide_channel_parameters(&channel_parameters);
7710 (channel_keys_id, holder_signer)
7712 // `keys_data` can be `None` if we had corrupted data.
7713 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7714 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7715 (holder_signer.channel_keys_id(), holder_signer)
7718 if let Some(preimages) = preimages_opt {
7719 let mut iter = preimages.into_iter();
7720 for htlc in pending_outbound_htlcs.iter_mut() {
7722 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7723 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7725 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7726 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7731 // We expect all preimages to be consumed above
7732 if iter.next().is_some() {
7733 return Err(DecodeError::InvalidValue);
7737 let chan_features = channel_type.as_ref().unwrap();
7738 if !chan_features.is_subset(our_supported_features) {
7739 // If the channel was written by a new version and negotiated with features we don't
7740 // understand yet, refuse to read it.
7741 return Err(DecodeError::UnknownRequiredFeature);
7744 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7745 // To account for that, we're proactively setting/overriding the field here.
7746 channel_parameters.channel_type_features = chan_features.clone();
7748 let mut secp_ctx = Secp256k1::new();
7749 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7751 // `user_id` used to be a single u64 value. In order to remain backwards
7752 // compatible with versions prior to 0.0.113, the u128 is serialized as two
7753 // separate u64 values.
7754 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
7756 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
7758 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
7759 let mut iter = skimmed_fees.into_iter();
7760 for htlc in pending_outbound_htlcs.iter_mut() {
7761 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7763 // We expect all skimmed fees to be consumed above
7764 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7766 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
7767 let mut iter = skimmed_fees.into_iter();
7768 for htlc in holding_cell_htlc_updates.iter_mut() {
7769 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
7770 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
7773 // We expect all skimmed fees to be consumed above
7774 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7776 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
7777 let mut iter = blinding_pts.into_iter();
7778 for htlc in pending_outbound_htlcs.iter_mut() {
7779 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7781 // We expect all blinding points to be consumed above
7782 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7784 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
7785 let mut iter = blinding_pts.into_iter();
7786 for htlc in holding_cell_htlc_updates.iter_mut() {
7787 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
7788 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
7791 // We expect all blinding points to be consumed above
7792 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
7796 context: ChannelContext {
7799 config: config.unwrap(),
7803 // Note that we don't care about serializing handshake limits as we only ever serialize
7804 // channel data after the handshake has completed.
7805 inbound_handshake_limits_override: None,
7808 temporary_channel_id,
7810 announcement_sigs_state: announcement_sigs_state.unwrap(),
7812 channel_value_satoshis,
7814 latest_monitor_update_id,
7816 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7817 shutdown_scriptpubkey,
7820 cur_holder_commitment_transaction_number,
7821 cur_counterparty_commitment_transaction_number,
7824 holder_max_accepted_htlcs,
7825 pending_inbound_htlcs,
7826 pending_outbound_htlcs,
7827 holding_cell_htlc_updates,
7831 monitor_pending_channel_ready,
7832 monitor_pending_revoke_and_ack,
7833 monitor_pending_commitment_signed,
7834 monitor_pending_forwards,
7835 monitor_pending_failures,
7836 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
7838 signer_pending_commitment_update: false,
7839 signer_pending_funding: false,
7842 holding_cell_update_fee,
7843 next_holder_htlc_id,
7844 next_counterparty_htlc_id,
7845 update_time_counter,
7848 #[cfg(debug_assertions)]
7849 holder_max_commitment_tx_output: Mutex::new((0, 0)),
7850 #[cfg(debug_assertions)]
7851 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
7853 last_sent_closing_fee: None,
7854 pending_counterparty_closing_signed: None,
7855 expecting_peer_commitment_signed: false,
7856 closing_fee_limits: None,
7857 target_closing_feerate_sats_per_kw,
7859 funding_tx_confirmed_in,
7860 funding_tx_confirmation_height,
7862 channel_creation_height: channel_creation_height.unwrap(),
7864 counterparty_dust_limit_satoshis,
7865 holder_dust_limit_satoshis,
7866 counterparty_max_htlc_value_in_flight_msat,
7867 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
7868 counterparty_selected_channel_reserve_satoshis,
7869 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
7870 counterparty_htlc_minimum_msat,
7871 holder_htlc_minimum_msat,
7872 counterparty_max_accepted_htlcs,
7875 counterparty_forwarding_info,
7877 channel_transaction_parameters: channel_parameters,
7878 funding_transaction,
7881 counterparty_cur_commitment_point,
7882 counterparty_prev_commitment_point,
7883 counterparty_node_id,
7885 counterparty_shutdown_scriptpubkey,
7889 channel_update_status,
7890 closing_signed_in_flight: false,
7894 #[cfg(any(test, fuzzing))]
7895 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7896 #[cfg(any(test, fuzzing))]
7897 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7899 workaround_lnd_bug_4006: None,
7900 sent_message_awaiting_response: None,
7902 latest_inbound_scid_alias,
7903 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
7904 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
7906 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
7907 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
7909 #[cfg(any(test, fuzzing))]
7910 historical_inbound_htlc_fulfills,
7912 channel_type: channel_type.unwrap(),
7915 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
7924 use bitcoin::blockdata::constants::ChainHash;
7925 use bitcoin::blockdata::script::{ScriptBuf, Builder};
7926 use bitcoin::blockdata::transaction::{Transaction, TxOut};
7927 use bitcoin::blockdata::opcodes;
7928 use bitcoin::network::constants::Network;
7929 use crate::ln::{PaymentHash, PaymentPreimage};
7930 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
7931 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
7932 use crate::ln::channel::InitFeatures;
7933 use crate::ln::channel::{Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
7934 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
7935 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
7936 use crate::ln::msgs;
7937 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
7938 use crate::ln::script::ShutdownScript;
7939 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
7940 use crate::chain::BestBlock;
7941 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
7942 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
7943 use crate::chain::transaction::OutPoint;
7944 use crate::routing::router::{Path, RouteHop};
7945 use crate::util::config::UserConfig;
7946 use crate::util::errors::APIError;
7947 use crate::util::ser::{ReadableArgs, Writeable};
7948 use crate::util::test_utils;
7949 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
7950 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
7951 use bitcoin::secp256k1::ffi::Signature as FFISignature;
7952 use bitcoin::secp256k1::{SecretKey,PublicKey};
7953 use bitcoin::hashes::sha256::Hash as Sha256;
7954 use bitcoin::hashes::Hash;
7955 use bitcoin::hashes::hex::FromHex;
7956 use bitcoin::hash_types::WPubkeyHash;
7957 use bitcoin::blockdata::locktime::absolute::LockTime;
7958 use bitcoin::address::{WitnessProgram, WitnessVersion};
7959 use crate::prelude::*;
7961 struct TestFeeEstimator {
7964 impl FeeEstimator for TestFeeEstimator {
7965 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
7971 fn test_max_funding_satoshis_no_wumbo() {
7972 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
7973 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
7974 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
7978 signer: InMemorySigner,
7981 impl EntropySource for Keys {
7982 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
7985 impl SignerProvider for Keys {
7986 type EcdsaSigner = InMemorySigner;
7988 type TaprootSigner = InMemorySigner;
7990 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
7991 self.signer.channel_keys_id()
7994 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
7998 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8000 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8001 let secp_ctx = Secp256k1::signing_only();
8002 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8003 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8004 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8007 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8008 let secp_ctx = Secp256k1::signing_only();
8009 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8010 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8014 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8015 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8016 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8020 fn upfront_shutdown_script_incompatibility() {
8021 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8022 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8023 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8026 let seed = [42; 32];
8027 let network = Network::Testnet;
8028 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8029 keys_provider.expect(OnGetShutdownScriptpubkey {
8030 returns: non_v0_segwit_shutdown_script.clone(),
8033 let secp_ctx = Secp256k1::new();
8034 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8035 let config = UserConfig::default();
8036 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8037 Err(APIError::IncompatibleShutdownScript { script }) => {
8038 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8040 Err(e) => panic!("Unexpected error: {:?}", e),
8041 Ok(_) => panic!("Expected error"),
8045 // Check that, during channel creation, we use the same feerate in the open channel message
8046 // as we do in the Channel object creation itself.
8048 fn test_open_channel_msg_fee() {
8049 let original_fee = 253;
8050 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8051 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8052 let secp_ctx = Secp256k1::new();
8053 let seed = [42; 32];
8054 let network = Network::Testnet;
8055 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8057 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8058 let config = UserConfig::default();
8059 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8061 // Now change the fee so we can check that the fee in the open_channel message is the
8062 // same as the old fee.
8063 fee_est.fee_est = 500;
8064 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8065 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8069 fn test_holder_vs_counterparty_dust_limit() {
8070 // Test that when calculating the local and remote commitment transaction fees, the correct
8071 // dust limits are used.
8072 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8073 let secp_ctx = Secp256k1::new();
8074 let seed = [42; 32];
8075 let network = Network::Testnet;
8076 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8077 let logger = test_utils::TestLogger::new();
8078 let best_block = BestBlock::from_network(network);
8080 // Go through the flow of opening a channel between two nodes, making sure
8081 // they have different dust limits.
8083 // Create Node A's channel pointing to Node B's pubkey
8084 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8085 let config = UserConfig::default();
8086 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8088 // Create Node B's channel by receiving Node A's open_channel message
8089 // Make sure A's dust limit is as we expect.
8090 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8091 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8092 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8094 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8095 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8096 accept_channel_msg.dust_limit_satoshis = 546;
8097 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8098 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8100 // Node A --> Node B: funding created
8101 let output_script = node_a_chan.context.get_funding_redeemscript();
8102 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8103 value: 10000000, script_pubkey: output_script.clone(),
8105 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8106 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8107 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8109 // Node B --> Node A: funding signed
8110 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8112 // Put some inbound and outbound HTLCs in A's channel.
8113 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8114 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8116 amount_msat: htlc_amount_msat,
8117 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8118 cltv_expiry: 300000000,
8119 state: InboundHTLCState::Committed,
8122 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8124 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8125 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8126 cltv_expiry: 200000000,
8127 state: OutboundHTLCState::Committed,
8128 source: HTLCSource::OutboundRoute {
8129 path: Path { hops: Vec::new(), blinded_tail: None },
8130 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8131 first_hop_htlc_msat: 548,
8132 payment_id: PaymentId([42; 32]),
8134 skimmed_fee_msat: None,
8135 blinding_point: None,
8138 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8139 // the dust limit check.
8140 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8141 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8142 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8143 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8145 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8146 // of the HTLCs are seen to be above the dust limit.
8147 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8148 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8149 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8150 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8151 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8155 fn test_timeout_vs_success_htlc_dust_limit() {
8156 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8157 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8158 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8159 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8160 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8161 let secp_ctx = Secp256k1::new();
8162 let seed = [42; 32];
8163 let network = Network::Testnet;
8164 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8166 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8167 let config = UserConfig::default();
8168 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8170 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8171 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8173 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8174 // counted as dust when it shouldn't be.
8175 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8176 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8177 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8178 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8180 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8181 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8182 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8183 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8184 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8186 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8188 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8189 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8190 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8191 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8192 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8194 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8195 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8196 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8197 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8198 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8202 fn channel_reestablish_no_updates() {
8203 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8204 let logger = test_utils::TestLogger::new();
8205 let secp_ctx = Secp256k1::new();
8206 let seed = [42; 32];
8207 let network = Network::Testnet;
8208 let best_block = BestBlock::from_network(network);
8209 let chain_hash = ChainHash::using_genesis_block(network);
8210 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8212 // Go through the flow of opening a channel between two nodes.
8214 // Create Node A's channel pointing to Node B's pubkey
8215 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8216 let config = UserConfig::default();
8217 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8219 // Create Node B's channel by receiving Node A's open_channel message
8220 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8221 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8222 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8224 // Node B --> Node A: accept channel
8225 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8226 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8228 // Node A --> Node B: funding created
8229 let output_script = node_a_chan.context.get_funding_redeemscript();
8230 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8231 value: 10000000, script_pubkey: output_script.clone(),
8233 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8234 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8235 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8237 // Node B --> Node A: funding signed
8238 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8240 // Now disconnect the two nodes and check that the commitment point in
8241 // Node B's channel_reestablish message is sane.
8242 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8243 let msg = node_b_chan.get_channel_reestablish(&&logger);
8244 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8245 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8246 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8248 // Check that the commitment point in Node A's channel_reestablish message
8250 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8251 let msg = node_a_chan.get_channel_reestablish(&&logger);
8252 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8253 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8254 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8258 fn test_configured_holder_max_htlc_value_in_flight() {
8259 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8260 let logger = test_utils::TestLogger::new();
8261 let secp_ctx = Secp256k1::new();
8262 let seed = [42; 32];
8263 let network = Network::Testnet;
8264 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8265 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8266 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8268 let mut config_2_percent = UserConfig::default();
8269 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8270 let mut config_99_percent = UserConfig::default();
8271 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8272 let mut config_0_percent = UserConfig::default();
8273 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8274 let mut config_101_percent = UserConfig::default();
8275 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8277 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8278 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8279 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8280 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8281 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8282 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8284 // Test with the upper bound - 1 of valid values (99%).
8285 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8286 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8287 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8289 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8291 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8292 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8293 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8294 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8295 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8296 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8298 // Test with the upper bound - 1 of valid values (99%).
8299 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8300 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8301 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8303 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8304 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8305 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8306 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8307 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8309 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8310 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8312 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8313 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8314 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8316 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8317 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8318 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8319 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8320 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8322 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8323 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8325 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8326 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8327 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8331 fn test_configured_holder_selected_channel_reserve_satoshis() {
8333 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8334 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8335 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8337 // Test with valid but unreasonably high channel reserves
8338 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8339 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8340 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8342 // Test with calculated channel reserve less than lower bound
8343 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8344 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8346 // Test with invalid channel reserves since sum of both is greater than or equal
8348 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8349 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8352 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8353 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8354 let logger = test_utils::TestLogger::new();
8355 let secp_ctx = Secp256k1::new();
8356 let seed = [42; 32];
8357 let network = Network::Testnet;
8358 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8359 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8360 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8363 let mut outbound_node_config = UserConfig::default();
8364 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8365 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8367 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8368 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8370 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8371 let mut inbound_node_config = UserConfig::default();
8372 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8374 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8375 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8377 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8379 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8380 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8382 // Channel Negotiations failed
8383 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8384 assert!(result.is_err());
8389 fn channel_update() {
8390 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8391 let logger = test_utils::TestLogger::new();
8392 let secp_ctx = Secp256k1::new();
8393 let seed = [42; 32];
8394 let network = Network::Testnet;
8395 let best_block = BestBlock::from_network(network);
8396 let chain_hash = ChainHash::using_genesis_block(network);
8397 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8399 // Create Node A's channel pointing to Node B's pubkey
8400 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8401 let config = UserConfig::default();
8402 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8404 // Create Node B's channel by receiving Node A's open_channel message
8405 // Make sure A's dust limit is as we expect.
8406 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8407 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8408 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8410 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8411 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8412 accept_channel_msg.dust_limit_satoshis = 546;
8413 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8414 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8416 // Node A --> Node B: funding created
8417 let output_script = node_a_chan.context.get_funding_redeemscript();
8418 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8419 value: 10000000, script_pubkey: output_script.clone(),
8421 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8422 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8423 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8425 // Node B --> Node A: funding signed
8426 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8428 // Make sure that receiving a channel update will update the Channel as expected.
8429 let update = ChannelUpdate {
8430 contents: UnsignedChannelUpdate {
8432 short_channel_id: 0,
8435 cltv_expiry_delta: 100,
8436 htlc_minimum_msat: 5,
8437 htlc_maximum_msat: MAX_VALUE_MSAT,
8439 fee_proportional_millionths: 11,
8440 excess_data: Vec::new(),
8442 signature: Signature::from(unsafe { FFISignature::new() })
8444 assert!(node_a_chan.channel_update(&update).unwrap());
8446 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8447 // change our official htlc_minimum_msat.
8448 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8449 match node_a_chan.context.counterparty_forwarding_info() {
8451 assert_eq!(info.cltv_expiry_delta, 100);
8452 assert_eq!(info.fee_base_msat, 110);
8453 assert_eq!(info.fee_proportional_millionths, 11);
8455 None => panic!("expected counterparty forwarding info to be Some")
8458 assert!(!node_a_chan.channel_update(&update).unwrap());
8462 fn blinding_point_skimmed_fee_ser() {
8463 // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
8464 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8465 let secp_ctx = Secp256k1::new();
8466 let seed = [42; 32];
8467 let network = Network::Testnet;
8468 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8470 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8471 let config = UserConfig::default();
8472 let features = channelmanager::provided_init_features(&config);
8473 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8474 let mut chan = Channel { context: outbound_chan.context };
8476 let dummy_htlc_source = HTLCSource::OutboundRoute {
8478 hops: vec![RouteHop {
8479 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8480 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8481 cltv_expiry_delta: 0, maybe_announced_channel: false,
8485 session_priv: test_utils::privkey(42),
8486 first_hop_htlc_msat: 0,
8487 payment_id: PaymentId([42; 32]),
8489 let dummy_outbound_output = OutboundHTLCOutput {
8492 payment_hash: PaymentHash([43; 32]),
8494 state: OutboundHTLCState::Committed,
8495 source: dummy_htlc_source.clone(),
8496 skimmed_fee_msat: None,
8497 blinding_point: None,
8499 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8500 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8502 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8505 htlc.skimmed_fee_msat = Some(1);
8508 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8510 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8513 payment_hash: PaymentHash([43; 32]),
8514 source: dummy_htlc_source.clone(),
8515 onion_routing_packet: msgs::OnionPacket {
8517 public_key: Ok(test_utils::pubkey(1)),
8518 hop_data: [0; 20*65],
8521 skimmed_fee_msat: None,
8522 blinding_point: None,
8524 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8525 payment_preimage: PaymentPreimage([42; 32]),
8528 let mut holding_cell_htlc_updates = Vec::with_capacity(10);
8531 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8532 } else if i % 3 == 1 {
8533 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8535 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8536 if let HTLCUpdateAwaitingACK::AddHTLC {
8537 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8538 } = &mut dummy_add {
8539 *blinding_point = Some(test_utils::pubkey(42 + i));
8540 *skimmed_fee_msat = Some(42);
8542 holding_cell_htlc_updates.push(dummy_add);
8545 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8547 // Encode and decode the channel and ensure that the HTLCs within are the same.
8548 let encoded_chan = chan.encode();
8549 let mut s = crate::io::Cursor::new(&encoded_chan);
8550 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8551 let features = channelmanager::provided_channel_type_features(&config);
8552 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8553 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8554 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8557 #[cfg(feature = "_test_vectors")]
8559 fn outbound_commitment_test() {
8560 use bitcoin::sighash;
8561 use bitcoin::consensus::encode::serialize;
8562 use bitcoin::sighash::EcdsaSighashType;
8563 use bitcoin::hashes::hex::FromHex;
8564 use bitcoin::hash_types::Txid;
8565 use bitcoin::secp256k1::Message;
8566 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8567 use crate::ln::PaymentPreimage;
8568 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8569 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8570 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8571 use crate::util::logger::Logger;
8572 use crate::sync::Arc;
8573 use core::str::FromStr;
8574 use hex::DisplayHex;
8576 // Test vectors from BOLT 3 Appendices C and F (anchors):
8577 let feeest = TestFeeEstimator{fee_est: 15000};
8578 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8579 let secp_ctx = Secp256k1::new();
8581 let mut signer = InMemorySigner::new(
8583 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8584 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8585 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8586 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8587 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8589 // These aren't set in the test vectors:
8590 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8596 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8597 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8598 let keys_provider = Keys { signer: signer.clone() };
8600 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8601 let mut config = UserConfig::default();
8602 config.channel_handshake_config.announced_channel = false;
8603 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8604 chan.context.holder_dust_limit_satoshis = 546;
8605 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8607 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8609 let counterparty_pubkeys = ChannelPublicKeys {
8610 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8611 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8612 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8613 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8614 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8616 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8617 CounterpartyChannelTransactionParameters {
8618 pubkeys: counterparty_pubkeys.clone(),
8619 selected_contest_delay: 144
8621 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8622 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8624 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8625 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8627 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8628 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8630 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8631 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8633 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8634 // derived from a commitment_seed, so instead we copy it here and call
8635 // build_commitment_transaction.
8636 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8637 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8638 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8639 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8640 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8642 macro_rules! test_commitment {
8643 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8644 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8645 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8649 macro_rules! test_commitment_with_anchors {
8650 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8651 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8652 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8656 macro_rules! test_commitment_common {
8657 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8658 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8660 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8661 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8663 let htlcs = commitment_stats.htlcs_included.drain(..)
8664 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8666 (commitment_stats.tx, htlcs)
8668 let trusted_tx = commitment_tx.trust();
8669 let unsigned_tx = trusted_tx.built_transaction();
8670 let redeemscript = chan.context.get_funding_redeemscript();
8671 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8672 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8673 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8674 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8676 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8677 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8678 let mut counterparty_htlc_sigs = Vec::new();
8679 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8681 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8682 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8683 counterparty_htlc_sigs.push(remote_signature);
8685 assert_eq!(htlcs.len(), per_htlc.len());
8687 let holder_commitment_tx = HolderCommitmentTransaction::new(
8688 commitment_tx.clone(),
8689 counterparty_signature,
8690 counterparty_htlc_sigs,
8691 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8692 chan.context.counterparty_funding_pubkey()
8694 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8695 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8697 let funding_redeemscript = chan.context.get_funding_redeemscript();
8698 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8699 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8701 // ((htlc, counterparty_sig), (index, holder_sig))
8702 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8705 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8706 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8708 let ref htlc = htlcs[$htlc_idx];
8709 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8710 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8711 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8712 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8713 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8714 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8715 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8717 let mut preimage: Option<PaymentPreimage> = None;
8720 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8721 if out == htlc.payment_hash {
8722 preimage = Some(PaymentPreimage([i; 32]));
8726 assert!(preimage.is_some());
8729 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8730 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8731 channel_derivation_parameters: ChannelDerivationParameters {
8732 value_satoshis: chan.context.channel_value_satoshis,
8733 keys_id: chan.context.channel_keys_id,
8734 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8736 commitment_txid: trusted_tx.txid(),
8737 per_commitment_number: trusted_tx.commitment_number(),
8738 per_commitment_point: trusted_tx.per_commitment_point(),
8739 feerate_per_kw: trusted_tx.feerate_per_kw(),
8741 preimage: preimage.clone(),
8742 counterparty_sig: *htlc_counterparty_sig,
8743 }, &secp_ctx).unwrap();
8744 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8745 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8747 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
8748 assert_eq!(signature, htlc_holder_sig, "htlc sig");
8749 let trusted_tx = holder_commitment_tx.trust();
8750 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
8751 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
8752 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
8754 assert!(htlc_counterparty_sig_iter.next().is_none());
8758 // anchors: simple commitment tx with no HTLCs and single anchor
8759 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
8760 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
8761 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8763 // simple commitment tx with no HTLCs
8764 chan.context.value_to_self_msat = 7000000000;
8766 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
8767 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
8768 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8770 // anchors: simple commitment tx with no HTLCs
8771 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
8772 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
8773 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
8775 chan.context.pending_inbound_htlcs.push({
8776 let mut out = InboundHTLCOutput{
8778 amount_msat: 1000000,
8780 payment_hash: PaymentHash([0; 32]),
8781 state: InboundHTLCState::Committed,
8783 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
8786 chan.context.pending_inbound_htlcs.push({
8787 let mut out = InboundHTLCOutput{
8789 amount_msat: 2000000,
8791 payment_hash: PaymentHash([0; 32]),
8792 state: InboundHTLCState::Committed,
8794 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
8797 chan.context.pending_outbound_htlcs.push({
8798 let mut out = OutboundHTLCOutput{
8800 amount_msat: 2000000,
8802 payment_hash: PaymentHash([0; 32]),
8803 state: OutboundHTLCState::Committed,
8804 source: HTLCSource::dummy(),
8805 skimmed_fee_msat: None,
8806 blinding_point: None,
8808 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
8811 chan.context.pending_outbound_htlcs.push({
8812 let mut out = OutboundHTLCOutput{
8814 amount_msat: 3000000,
8816 payment_hash: PaymentHash([0; 32]),
8817 state: OutboundHTLCState::Committed,
8818 source: HTLCSource::dummy(),
8819 skimmed_fee_msat: None,
8820 blinding_point: None,
8822 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
8825 chan.context.pending_inbound_htlcs.push({
8826 let mut out = InboundHTLCOutput{
8828 amount_msat: 4000000,
8830 payment_hash: PaymentHash([0; 32]),
8831 state: InboundHTLCState::Committed,
8833 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
8837 // commitment tx with all five HTLCs untrimmed (minimum feerate)
8838 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8839 chan.context.feerate_per_kw = 0;
8841 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
8842 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
8843 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8846 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
8847 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
8848 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8851 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
8852 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
8853 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8856 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
8857 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
8858 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8861 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
8862 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
8863 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8866 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
8867 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
8868 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8871 // commitment tx with seven outputs untrimmed (maximum feerate)
8872 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8873 chan.context.feerate_per_kw = 647;
8875 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
8876 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
8877 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8880 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
8881 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
8882 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
8885 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
8886 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
8887 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8890 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
8891 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
8892 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8895 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
8896 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
8897 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8900 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
8901 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
8902 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8905 // commitment tx with six outputs untrimmed (minimum feerate)
8906 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8907 chan.context.feerate_per_kw = 648;
8909 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
8910 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
8911 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8914 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
8915 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
8916 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8919 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
8920 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
8921 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8924 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
8925 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
8926 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8929 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
8930 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
8931 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8934 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
8935 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8936 chan.context.feerate_per_kw = 645;
8937 chan.context.holder_dust_limit_satoshis = 1001;
8939 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
8940 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
8941 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8944 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
8945 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
8946 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
8949 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
8950 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
8951 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
8954 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
8955 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
8956 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
8959 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
8960 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
8961 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
8964 // commitment tx with six outputs untrimmed (maximum feerate)
8965 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8966 chan.context.feerate_per_kw = 2069;
8967 chan.context.holder_dust_limit_satoshis = 546;
8969 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
8970 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
8971 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
8974 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
8975 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
8976 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
8979 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
8980 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
8981 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
8984 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
8985 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
8986 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
8989 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
8990 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
8991 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
8994 // commitment tx with five outputs untrimmed (minimum feerate)
8995 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
8996 chan.context.feerate_per_kw = 2070;
8998 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
8999 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9000 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9003 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9004 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9005 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9008 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9009 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9010 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9013 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9014 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9015 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9018 // commitment tx with five outputs untrimmed (maximum feerate)
9019 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9020 chan.context.feerate_per_kw = 2194;
9022 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9023 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9024 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9027 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9028 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9029 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9032 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9033 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9034 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9037 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9038 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9039 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9042 // commitment tx with four outputs untrimmed (minimum feerate)
9043 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9044 chan.context.feerate_per_kw = 2195;
9046 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9047 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9048 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9051 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9052 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9053 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9056 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9057 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9058 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9061 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9062 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9063 chan.context.feerate_per_kw = 2185;
9064 chan.context.holder_dust_limit_satoshis = 2001;
9065 let cached_channel_type = chan.context.channel_type;
9066 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9068 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9069 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9070 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9073 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9074 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9075 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9078 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9079 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9080 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9083 // commitment tx with four outputs untrimmed (maximum feerate)
9084 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9085 chan.context.feerate_per_kw = 3702;
9086 chan.context.holder_dust_limit_satoshis = 546;
9087 chan.context.channel_type = cached_channel_type.clone();
9089 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9090 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9091 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9094 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9095 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9096 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9099 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9100 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9101 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9104 // commitment tx with three outputs untrimmed (minimum feerate)
9105 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9106 chan.context.feerate_per_kw = 3703;
9108 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9109 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9110 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9113 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9114 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9115 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9118 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9119 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9120 chan.context.feerate_per_kw = 3687;
9121 chan.context.holder_dust_limit_satoshis = 3001;
9122 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9124 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9125 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9126 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9129 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9130 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9131 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9134 // commitment tx with three outputs untrimmed (maximum feerate)
9135 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9136 chan.context.feerate_per_kw = 4914;
9137 chan.context.holder_dust_limit_satoshis = 546;
9138 chan.context.channel_type = cached_channel_type.clone();
9140 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9141 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9142 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9145 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9146 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9147 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9150 // commitment tx with two outputs untrimmed (minimum feerate)
9151 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9152 chan.context.feerate_per_kw = 4915;
9153 chan.context.holder_dust_limit_satoshis = 546;
9155 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9156 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9157 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9159 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9160 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9161 chan.context.feerate_per_kw = 4894;
9162 chan.context.holder_dust_limit_satoshis = 4001;
9163 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9165 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9166 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9167 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9169 // commitment tx with two outputs untrimmed (maximum feerate)
9170 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9171 chan.context.feerate_per_kw = 9651180;
9172 chan.context.holder_dust_limit_satoshis = 546;
9173 chan.context.channel_type = cached_channel_type.clone();
9175 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9176 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9177 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9179 // commitment tx with one output untrimmed (minimum feerate)
9180 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9181 chan.context.feerate_per_kw = 9651181;
9183 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9184 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9185 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9187 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9188 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9189 chan.context.feerate_per_kw = 6216010;
9190 chan.context.holder_dust_limit_satoshis = 4001;
9191 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9193 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9194 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9195 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9197 // commitment tx with fee greater than funder amount
9198 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9199 chan.context.feerate_per_kw = 9651936;
9200 chan.context.holder_dust_limit_satoshis = 546;
9201 chan.context.channel_type = cached_channel_type;
9203 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9204 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9205 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9207 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9208 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9209 chan.context.feerate_per_kw = 253;
9210 chan.context.pending_inbound_htlcs.clear();
9211 chan.context.pending_inbound_htlcs.push({
9212 let mut out = InboundHTLCOutput{
9214 amount_msat: 2000000,
9216 payment_hash: PaymentHash([0; 32]),
9217 state: InboundHTLCState::Committed,
9219 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9222 chan.context.pending_outbound_htlcs.clear();
9223 chan.context.pending_outbound_htlcs.push({
9224 let mut out = OutboundHTLCOutput{
9226 amount_msat: 5000001,
9228 payment_hash: PaymentHash([0; 32]),
9229 state: OutboundHTLCState::Committed,
9230 source: HTLCSource::dummy(),
9231 skimmed_fee_msat: None,
9232 blinding_point: None,
9234 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9237 chan.context.pending_outbound_htlcs.push({
9238 let mut out = OutboundHTLCOutput{
9240 amount_msat: 5000000,
9242 payment_hash: PaymentHash([0; 32]),
9243 state: OutboundHTLCState::Committed,
9244 source: HTLCSource::dummy(),
9245 skimmed_fee_msat: None,
9246 blinding_point: None,
9248 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9252 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9253 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9254 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9257 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9258 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9259 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9261 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9262 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9263 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9265 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9266 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9267 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9270 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9271 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9272 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9273 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9276 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9277 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9278 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9280 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9281 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9282 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9284 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9285 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9286 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9291 fn test_per_commitment_secret_gen() {
9292 // Test vectors from BOLT 3 Appendix D:
9294 let mut seed = [0; 32];
9295 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9296 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9297 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9299 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9300 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9301 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9303 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9304 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9306 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9307 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9309 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9310 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9311 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9315 fn test_key_derivation() {
9316 // Test vectors from BOLT 3 Appendix E:
9317 let secp_ctx = Secp256k1::new();
9319 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9320 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9322 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9323 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9325 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9326 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9328 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9329 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9331 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9332 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9334 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9335 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9339 fn test_zero_conf_channel_type_support() {
9340 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9341 let secp_ctx = Secp256k1::new();
9342 let seed = [42; 32];
9343 let network = Network::Testnet;
9344 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9345 let logger = test_utils::TestLogger::new();
9347 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9348 let config = UserConfig::default();
9349 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9350 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9352 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9353 channel_type_features.set_zero_conf_required();
9355 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9356 open_channel_msg.channel_type = Some(channel_type_features);
9357 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9358 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9359 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9360 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9361 assert!(res.is_ok());
9365 fn test_supports_anchors_zero_htlc_tx_fee() {
9366 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9367 // resulting `channel_type`.
9368 let secp_ctx = Secp256k1::new();
9369 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9370 let network = Network::Testnet;
9371 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9372 let logger = test_utils::TestLogger::new();
9374 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9375 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9377 let mut config = UserConfig::default();
9378 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9380 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9381 // need to signal it.
9382 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9383 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9384 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9385 &config, 0, 42, None
9387 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9389 let mut expected_channel_type = ChannelTypeFeatures::empty();
9390 expected_channel_type.set_static_remote_key_required();
9391 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9393 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9394 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9395 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9399 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9400 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9401 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9402 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9403 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9406 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9407 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9411 fn test_rejects_implicit_simple_anchors() {
9412 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9413 // each side's `InitFeatures`, it is rejected.
9414 let secp_ctx = Secp256k1::new();
9415 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9416 let network = Network::Testnet;
9417 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9418 let logger = test_utils::TestLogger::new();
9420 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9421 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9423 let config = UserConfig::default();
9425 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9426 let static_remote_key_required: u64 = 1 << 12;
9427 let simple_anchors_required: u64 = 1 << 20;
9428 let raw_init_features = static_remote_key_required | simple_anchors_required;
9429 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9431 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9432 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9433 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9437 // Set `channel_type` to `None` to force the implicit feature negotiation.
9438 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9439 open_channel_msg.channel_type = None;
9441 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9442 // `static_remote_key`, it will fail the channel.
9443 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9444 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9445 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9446 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9448 assert!(channel_b.is_err());
9452 fn test_rejects_simple_anchors_channel_type() {
9453 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9455 let secp_ctx = Secp256k1::new();
9456 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9457 let network = Network::Testnet;
9458 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9459 let logger = test_utils::TestLogger::new();
9461 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9462 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9464 let config = UserConfig::default();
9466 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9467 let static_remote_key_required: u64 = 1 << 12;
9468 let simple_anchors_required: u64 = 1 << 20;
9469 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9470 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9471 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9472 assert!(!simple_anchors_init.requires_unknown_bits());
9473 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9475 // First, we'll try to open a channel between A and B where A requests a channel type for
9476 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9477 // B as it's not supported by LDK.
9478 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9479 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9480 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9484 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9485 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9487 let res = InboundV1Channel::<&TestKeysInterface>::new(
9488 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9489 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9490 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9492 assert!(res.is_err());
9494 // Then, we'll try to open another channel where A requests a channel type for
9495 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9496 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9498 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9499 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9500 10000000, 100000, 42, &config, 0, 42, None
9503 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9505 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9506 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9507 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9508 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9511 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9512 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9514 let res = channel_a.accept_channel(
9515 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9517 assert!(res.is_err());
9521 fn test_waiting_for_batch() {
9522 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9523 let logger = test_utils::TestLogger::new();
9524 let secp_ctx = Secp256k1::new();
9525 let seed = [42; 32];
9526 let network = Network::Testnet;
9527 let best_block = BestBlock::from_network(network);
9528 let chain_hash = ChainHash::using_genesis_block(network);
9529 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9531 let mut config = UserConfig::default();
9532 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9533 // channel in a batch before all channels are ready.
9534 config.channel_handshake_limits.trust_own_funding_0conf = true;
9536 // Create a channel from node a to node b that will be part of batch funding.
9537 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9538 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9543 &channelmanager::provided_init_features(&config),
9553 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9554 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9555 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9560 &channelmanager::provided_channel_type_features(&config),
9561 &channelmanager::provided_init_features(&config),
9567 true, // Allow node b to send a 0conf channel_ready.
9570 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9571 node_a_chan.accept_channel(
9572 &accept_channel_msg,
9573 &config.channel_handshake_limits,
9574 &channelmanager::provided_init_features(&config),
9577 // Fund the channel with a batch funding transaction.
9578 let output_script = node_a_chan.context.get_funding_redeemscript();
9579 let tx = Transaction {
9581 lock_time: LockTime::ZERO,
9585 value: 10000000, script_pubkey: output_script.clone(),
9588 value: 10000000, script_pubkey: Builder::new().into_script(),
9591 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9592 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9597 ).map_err(|_| ()).unwrap();
9598 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9599 &funding_created_msg.unwrap(),
9603 ).map_err(|_| ()).unwrap();
9604 let node_b_updates = node_b_chan.monitor_updating_restored(
9612 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9613 // broadcasting the funding transaction until the batch is ready.
9614 let _ = node_a_chan.funding_signed(
9615 &funding_signed_msg.unwrap(),
9620 let node_a_updates = node_a_chan.monitor_updating_restored(
9627 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9628 // as the funding transaction depends on all channels in the batch becoming ready.
9629 assert!(node_a_updates.channel_ready.is_none());
9630 assert!(node_a_updates.funding_broadcastable.is_none());
9632 node_a_chan.context.channel_state,
9633 ChannelState::FundingSent as u32 |
9634 ChannelState::WaitingForBatch as u32,
9637 // It is possible to receive a 0conf channel_ready from the remote node.
9638 node_a_chan.channel_ready(
9639 &node_b_updates.channel_ready.unwrap(),
9647 node_a_chan.context.channel_state,
9648 ChannelState::FundingSent as u32 |
9649 ChannelState::WaitingForBatch as u32 |
9650 ChannelState::TheirChannelReady as u32,
9653 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9654 node_a_chan.set_batch_ready();
9656 node_a_chan.context.channel_state,
9657 ChannelState::FundingSent as u32 |
9658 ChannelState::TheirChannelReady as u32,
9660 assert!(node_a_chan.check_get_channel_ready(0).is_some());