1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
264 macro_rules! define_state_flags {
265 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
266 #[doc = $flag_type_doc]
267 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
268 struct $flag_type(u32);
273 const $flag: $flag_type = $flag_type($value);
276 /// All flags that apply to the specified [`ChannelState`] variant.
278 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
281 fn new() -> Self { Self(0) }
284 fn from_u32(flags: u32) -> Result<Self, ()> {
285 if flags & !Self::ALL.0 != 0 {
288 Ok($flag_type(flags))
293 fn is_empty(&self) -> bool { self.0 == 0 }
296 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
299 impl core::ops::Not for $flag_type {
301 fn not(self) -> Self::Output { Self(!self.0) }
303 impl core::ops::BitOr for $flag_type {
305 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
307 impl core::ops::BitOrAssign for $flag_type {
308 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
310 impl core::ops::BitAnd for $flag_type {
312 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
314 impl core::ops::BitAndAssign for $flag_type {
315 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
318 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
319 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
321 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
322 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
323 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
325 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
327 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
328 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
330 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
332 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
334 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
335 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
337 impl PartialEq<FundedStateFlags> for $flag_type {
338 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
340 impl From<FundedStateFlags> for $flag_type {
341 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
346 /// We declare all the states/flags here together to help determine which bits are still available
349 pub const OUR_INIT_SENT: u32 = 1 << 0;
350 pub const THEIR_INIT_SENT: u32 = 1 << 1;
351 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
352 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
353 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
354 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
355 pub const CHANNEL_READY: u32 = 1 << 6;
356 pub const PEER_DISCONNECTED: u32 = 1 << 7;
357 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
358 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
359 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
360 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
361 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
362 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
366 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
368 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
369 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
370 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
371 somewhere and we should pause sending any outbound messages until they've managed to \
372 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
373 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
374 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
375 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
376 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
377 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
382 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
383 NegotiatingFundingFlags, [
384 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
385 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
386 ("Indicates we have received their `open_channel`/`accept_channel` message.",
387 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
392 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
393 FUNDED_STATE, AwaitingChannelReadyFlags, [
394 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
395 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
396 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
397 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
398 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
399 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
400 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
401 is being held until all channels in the batch have received `funding_signed` and have \
402 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
407 "Flags that only apply to [`ChannelState::ChannelReady`].",
408 FUNDED_STATE, ChannelReadyFlags, [
409 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
410 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
411 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
412 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
413 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
417 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
419 /// We are negotiating the parameters required for the channel prior to funding it.
420 NegotiatingFunding(NegotiatingFundingFlags),
421 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
422 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
423 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
425 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
426 /// funding transaction to confirm.
427 AwaitingChannelReady(AwaitingChannelReadyFlags),
428 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
430 ChannelReady(ChannelReadyFlags),
431 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
432 /// is about to drop us, but we store this anyway.
436 macro_rules! impl_state_flag {
437 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
439 fn $get(&self) -> bool {
442 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
451 ChannelState::$state(flags) => *flags |= $state_flag,
453 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
457 fn $clear(&mut self) {
460 ChannelState::$state(flags) => *flags &= !($state_flag),
462 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
466 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
467 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
469 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
470 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
475 fn from_u32(state: u32) -> Result<Self, ()> {
477 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
478 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
480 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
481 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
482 .map(|flags| ChannelState::AwaitingChannelReady(flags))
483 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
484 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
485 .map(|flags| ChannelState::ChannelReady(flags))
486 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
487 Ok(ChannelState::NegotiatingFunding(flags))
495 fn to_u32(&self) -> u32 {
497 ChannelState::NegotiatingFunding(flags) => flags.0,
498 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
499 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
500 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
501 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
505 fn is_pre_funded_state(&self) -> bool {
506 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
509 fn is_both_sides_shutdown(&self) -> bool {
510 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
513 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
515 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
516 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
517 _ => FundedStateFlags::new(),
521 fn should_force_holding_cell(&self) -> bool {
523 ChannelState::ChannelReady(flags) =>
524 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
525 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
526 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
528 debug_assert!(false, "The holding cell is only valid within ChannelReady");
534 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
535 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
536 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
537 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
538 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
539 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
540 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
541 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
542 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
543 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
544 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
545 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
546 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
547 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
548 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
549 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
552 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
554 pub const DEFAULT_MAX_HTLCS: u16 = 50;
556 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
557 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
558 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
559 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
563 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
565 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
567 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
569 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
570 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
571 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
572 /// `holder_max_htlc_value_in_flight_msat`.
573 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
575 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
576 /// `option_support_large_channel` (aka wumbo channels) is not supported.
578 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
580 /// Total bitcoin supply in satoshis.
581 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
583 /// The maximum network dust limit for standard script formats. This currently represents the
584 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
585 /// transaction non-standard and thus refuses to relay it.
586 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
587 /// implementations use this value for their dust limit today.
588 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
590 /// The maximum channel dust limit we will accept from our counterparty.
591 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
593 /// The dust limit is used for both the commitment transaction outputs as well as the closing
594 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
595 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
596 /// In order to avoid having to concern ourselves with standardness during the closing process, we
597 /// simply require our counterparty to use a dust limit which will leave any segwit output
599 /// See <https://github.com/lightning/bolts/issues/905> for more details.
600 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
602 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
603 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
605 /// Used to return a simple Error back to ChannelManager. Will get converted to a
606 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
607 /// channel_id in ChannelManager.
608 pub(super) enum ChannelError {
614 impl fmt::Debug for ChannelError {
615 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
617 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
618 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
619 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
624 impl fmt::Display for ChannelError {
625 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
627 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
628 &ChannelError::Warn(ref e) => write!(f, "{}", e),
629 &ChannelError::Close(ref e) => write!(f, "{}", e),
634 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
636 pub peer_id: Option<PublicKey>,
637 pub channel_id: Option<ChannelId>,
640 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
641 fn log(&self, mut record: Record) {
642 record.peer_id = self.peer_id;
643 record.channel_id = self.channel_id;
644 self.logger.log(record)
648 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
649 where L::Target: Logger {
650 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
651 where S::Target: SignerProvider
655 peer_id: Some(context.counterparty_node_id),
656 channel_id: Some(context.channel_id),
661 macro_rules! secp_check {
662 ($res: expr, $err: expr) => {
665 Err(_) => return Err(ChannelError::Close($err)),
670 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
671 /// our counterparty or not. However, we don't want to announce updates right away to avoid
672 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
673 /// our channel_update message and track the current state here.
674 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
675 #[derive(Clone, Copy, PartialEq)]
676 pub(super) enum ChannelUpdateStatus {
677 /// We've announced the channel as enabled and are connected to our peer.
679 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
681 /// Our channel is live again, but we haven't announced the channel as enabled yet.
683 /// We've announced the channel as disabled.
687 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
689 pub enum AnnouncementSigsState {
690 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
691 /// we sent the last `AnnouncementSignatures`.
693 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
694 /// This state never appears on disk - instead we write `NotSent`.
696 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
697 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
698 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
699 /// they send back a `RevokeAndACK`.
700 /// This state never appears on disk - instead we write `NotSent`.
702 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
703 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
707 /// An enum indicating whether the local or remote side offered a given HTLC.
713 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
716 pending_htlcs_value_msat: u64,
717 on_counterparty_tx_dust_exposure_msat: u64,
718 on_holder_tx_dust_exposure_msat: u64,
719 holding_cell_msat: u64,
720 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
723 /// An enum gathering stats on commitment transaction, either local or remote.
724 struct CommitmentStats<'a> {
725 tx: CommitmentTransaction, // the transaction info
726 feerate_per_kw: u32, // the feerate included to build the transaction
727 total_fee_sat: u64, // the total fee included in the transaction
728 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
729 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
730 local_balance_msat: u64, // local balance before fees but considering dust limits
731 remote_balance_msat: u64, // remote balance before fees but considering dust limits
732 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
733 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
736 /// Used when calculating whether we or the remote can afford an additional HTLC.
737 struct HTLCCandidate {
739 origin: HTLCInitiator,
743 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
751 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
753 enum UpdateFulfillFetch {
755 monitor_update: ChannelMonitorUpdate,
756 htlc_value_msat: u64,
757 msg: Option<msgs::UpdateFulfillHTLC>,
762 /// The return type of get_update_fulfill_htlc_and_commit.
763 pub enum UpdateFulfillCommitFetch {
764 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
765 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
766 /// previously placed in the holding cell (and has since been removed).
768 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
769 monitor_update: ChannelMonitorUpdate,
770 /// The value of the HTLC which was claimed, in msat.
771 htlc_value_msat: u64,
773 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
774 /// or has been forgotten (presumably previously claimed).
778 /// The return value of `monitor_updating_restored`
779 pub(super) struct MonitorRestoreUpdates {
780 pub raa: Option<msgs::RevokeAndACK>,
781 pub commitment_update: Option<msgs::CommitmentUpdate>,
782 pub order: RAACommitmentOrder,
783 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
784 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
785 pub finalized_claimed_htlcs: Vec<HTLCSource>,
786 pub funding_broadcastable: Option<Transaction>,
787 pub channel_ready: Option<msgs::ChannelReady>,
788 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
791 /// The return value of `signer_maybe_unblocked`
793 pub(super) struct SignerResumeUpdates {
794 pub commitment_update: Option<msgs::CommitmentUpdate>,
795 pub funding_signed: Option<msgs::FundingSigned>,
796 pub channel_ready: Option<msgs::ChannelReady>,
799 /// The return value of `channel_reestablish`
800 pub(super) struct ReestablishResponses {
801 pub channel_ready: Option<msgs::ChannelReady>,
802 pub raa: Option<msgs::RevokeAndACK>,
803 pub commitment_update: Option<msgs::CommitmentUpdate>,
804 pub order: RAACommitmentOrder,
805 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
806 pub shutdown_msg: Option<msgs::Shutdown>,
809 /// The result of a shutdown that should be handled.
811 pub(crate) struct ShutdownResult {
812 /// A channel monitor update to apply.
813 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
814 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
815 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
816 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
817 /// propagated to the remainder of the batch.
818 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
819 pub(crate) channel_id: ChannelId,
820 pub(crate) counterparty_node_id: PublicKey,
823 /// If the majority of the channels funds are to the fundee and the initiator holds only just
824 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
825 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
826 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
827 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
828 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
829 /// by this multiple without hitting this case, before sending.
830 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
831 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
832 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
833 /// leave the channel less usable as we hold a bigger reserve.
834 #[cfg(any(fuzzing, test))]
835 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
836 #[cfg(not(any(fuzzing, test)))]
837 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
839 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
840 /// channel creation on an inbound channel, we simply force-close and move on.
841 /// This constant is the one suggested in BOLT 2.
842 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
844 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
845 /// not have enough balance value remaining to cover the onchain cost of this new
846 /// HTLC weight. If this happens, our counterparty fails the reception of our
847 /// commitment_signed including this new HTLC due to infringement on the channel
849 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
850 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
851 /// leads to a channel force-close. Ultimately, this is an issue coming from the
852 /// design of LN state machines, allowing asynchronous updates.
853 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
855 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
856 /// commitment transaction fees, with at least this many HTLCs present on the commitment
857 /// transaction (not counting the value of the HTLCs themselves).
858 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
860 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
861 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
862 /// ChannelUpdate prompted by the config update. This value was determined as follows:
864 /// * The expected interval between ticks (1 minute).
865 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
866 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
867 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
868 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
870 /// The number of ticks that may elapse while we're waiting for a response to a
871 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
874 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
875 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
877 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
878 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
879 /// exceeding this age limit will be force-closed and purged from memory.
880 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
882 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
883 pub(crate) const COINBASE_MATURITY: u32 = 100;
885 struct PendingChannelMonitorUpdate {
886 update: ChannelMonitorUpdate,
889 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
890 (0, update, required),
893 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
894 /// its variants containing an appropriate channel struct.
895 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
896 UnfundedOutboundV1(OutboundV1Channel<SP>),
897 UnfundedInboundV1(InboundV1Channel<SP>),
901 impl<'a, SP: Deref> ChannelPhase<SP> where
902 SP::Target: SignerProvider,
903 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
905 pub fn context(&'a self) -> &'a ChannelContext<SP> {
907 ChannelPhase::Funded(chan) => &chan.context,
908 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
909 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
913 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
915 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
916 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
917 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
922 /// Contains all state common to unfunded inbound/outbound channels.
923 pub(super) struct UnfundedChannelContext {
924 /// A counter tracking how many ticks have elapsed since this unfunded channel was
925 /// created. If this unfunded channel reaches peer has yet to respond after reaching
926 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
928 /// This is so that we don't keep channels around that haven't progressed to a funded state
929 /// in a timely manner.
930 unfunded_channel_age_ticks: usize,
933 impl UnfundedChannelContext {
934 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
935 /// having reached the unfunded channel age limit.
937 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
938 pub fn should_expire_unfunded_channel(&mut self) -> bool {
939 self.unfunded_channel_age_ticks += 1;
940 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
944 /// Contains everything about the channel including state, and various flags.
945 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
946 config: LegacyChannelConfig,
948 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
949 // constructed using it. The second element in the tuple corresponds to the number of ticks that
950 // have elapsed since the update occurred.
951 prev_config: Option<(ChannelConfig, usize)>,
953 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
957 /// The current channel ID.
958 channel_id: ChannelId,
959 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
960 /// Will be `None` for channels created prior to 0.0.115.
961 temporary_channel_id: Option<ChannelId>,
962 channel_state: ChannelState,
964 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
965 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
967 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
968 // Note that a number of our tests were written prior to the behavior here which retransmits
969 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
971 #[cfg(any(test, feature = "_test_utils"))]
972 pub(crate) announcement_sigs_state: AnnouncementSigsState,
973 #[cfg(not(any(test, feature = "_test_utils")))]
974 announcement_sigs_state: AnnouncementSigsState,
976 secp_ctx: Secp256k1<secp256k1::All>,
977 channel_value_satoshis: u64,
979 latest_monitor_update_id: u64,
981 holder_signer: ChannelSignerType<SP>,
982 shutdown_scriptpubkey: Option<ShutdownScript>,
983 destination_script: ScriptBuf,
985 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
986 // generation start at 0 and count up...this simplifies some parts of implementation at the
987 // cost of others, but should really just be changed.
989 cur_holder_commitment_transaction_number: u64,
990 cur_counterparty_commitment_transaction_number: u64,
991 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
992 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
993 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
994 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
996 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
997 /// need to ensure we resend them in the order we originally generated them. Note that because
998 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
999 /// sufficient to simply set this to the opposite of any message we are generating as we
1000 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1001 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1003 resend_order: RAACommitmentOrder,
1005 monitor_pending_channel_ready: bool,
1006 monitor_pending_revoke_and_ack: bool,
1007 monitor_pending_commitment_signed: bool,
1009 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1010 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1011 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1012 // but need to handle this somehow or we run the risk of losing HTLCs!
1013 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1014 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1015 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1017 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1018 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1019 /// the future when the signer indicates it may have a signature for us.
1021 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1022 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1023 signer_pending_commitment_update: bool,
1024 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1025 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1026 /// outbound or inbound.
1027 signer_pending_funding: bool,
1029 // pending_update_fee is filled when sending and receiving update_fee.
1031 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1032 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1033 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1034 // HTLCs with similar state.
1035 pending_update_fee: Option<(u32, FeeUpdateState)>,
1036 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1037 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1038 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1039 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1040 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1041 holding_cell_update_fee: Option<u32>,
1042 next_holder_htlc_id: u64,
1043 next_counterparty_htlc_id: u64,
1044 feerate_per_kw: u32,
1046 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1047 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1048 /// new block is received, ensuring it's always at least moderately close to the current real
1050 update_time_counter: u32,
1052 #[cfg(debug_assertions)]
1053 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1054 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1055 #[cfg(debug_assertions)]
1056 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1057 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1059 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1060 target_closing_feerate_sats_per_kw: Option<u32>,
1062 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1063 /// update, we need to delay processing it until later. We do that here by simply storing the
1064 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1065 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1067 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1068 /// transaction. These are set once we reach `closing_negotiation_ready`.
1070 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1072 closing_fee_limits: Option<(u64, u64)>,
1074 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1075 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1076 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1077 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1078 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1080 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1081 /// until we see a `commitment_signed` before doing so.
1083 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1084 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1085 expecting_peer_commitment_signed: bool,
1087 /// The hash of the block in which the funding transaction was included.
1088 funding_tx_confirmed_in: Option<BlockHash>,
1089 funding_tx_confirmation_height: u32,
1090 short_channel_id: Option<u64>,
1091 /// Either the height at which this channel was created or the height at which it was last
1092 /// serialized if it was serialized by versions prior to 0.0.103.
1093 /// We use this to close if funding is never broadcasted.
1094 channel_creation_height: u32,
1096 counterparty_dust_limit_satoshis: u64,
1099 pub(super) holder_dust_limit_satoshis: u64,
1101 holder_dust_limit_satoshis: u64,
1104 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1106 counterparty_max_htlc_value_in_flight_msat: u64,
1109 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1111 holder_max_htlc_value_in_flight_msat: u64,
1113 /// minimum channel reserve for self to maintain - set by them.
1114 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1117 pub(super) holder_selected_channel_reserve_satoshis: u64,
1119 holder_selected_channel_reserve_satoshis: u64,
1121 counterparty_htlc_minimum_msat: u64,
1122 holder_htlc_minimum_msat: u64,
1124 pub counterparty_max_accepted_htlcs: u16,
1126 counterparty_max_accepted_htlcs: u16,
1127 holder_max_accepted_htlcs: u16,
1128 minimum_depth: Option<u32>,
1130 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1132 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1133 funding_transaction: Option<Transaction>,
1134 is_batch_funding: Option<()>,
1136 counterparty_cur_commitment_point: Option<PublicKey>,
1137 counterparty_prev_commitment_point: Option<PublicKey>,
1138 counterparty_node_id: PublicKey,
1140 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1142 commitment_secrets: CounterpartyCommitmentSecrets,
1144 channel_update_status: ChannelUpdateStatus,
1145 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1146 /// not complete within a single timer tick (one minute), we should force-close the channel.
1147 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1149 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1150 /// our peer and start the closing_signed negotiation fresh.
1151 closing_signed_in_flight: bool,
1153 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1154 /// This can be used to rebroadcast the channel_announcement message later.
1155 announcement_sigs: Option<(Signature, Signature)>,
1157 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1158 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1159 // be, by comparing the cached values to the fee of the tranaction generated by
1160 // `build_commitment_transaction`.
1161 #[cfg(any(test, fuzzing))]
1162 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1163 #[cfg(any(test, fuzzing))]
1164 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1166 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1167 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1168 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1169 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1170 /// message until we receive a channel_reestablish.
1172 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1173 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1175 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1176 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1177 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1178 /// unblock the state machine.
1180 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1181 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1182 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1184 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1185 /// [`msgs::RevokeAndACK`] message from the counterparty.
1186 sent_message_awaiting_response: Option<usize>,
1188 #[cfg(any(test, fuzzing))]
1189 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1190 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1191 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1192 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1193 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1194 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1195 historical_inbound_htlc_fulfills: HashSet<u64>,
1197 /// This channel's type, as negotiated during channel open
1198 channel_type: ChannelTypeFeatures,
1200 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1201 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1202 // the channel's funding UTXO.
1204 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1205 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1206 // associated channel mapping.
1208 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1209 // to store all of them.
1210 latest_inbound_scid_alias: Option<u64>,
1212 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1213 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1214 // don't currently support node id aliases and eventually privacy should be provided with
1215 // blinded paths instead of simple scid+node_id aliases.
1216 outbound_scid_alias: u64,
1218 // We track whether we already emitted a `ChannelPending` event.
1219 channel_pending_event_emitted: bool,
1221 // We track whether we already emitted a `ChannelReady` event.
1222 channel_ready_event_emitted: bool,
1224 /// The unique identifier used to re-derive the private key material for the channel through
1225 /// [`SignerProvider::derive_channel_signer`].
1226 channel_keys_id: [u8; 32],
1228 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1229 /// store it here and only release it to the `ChannelManager` once it asks for it.
1230 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1233 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1234 /// Allowed in any state (including after shutdown)
1235 pub fn get_update_time_counter(&self) -> u32 {
1236 self.update_time_counter
1239 pub fn get_latest_monitor_update_id(&self) -> u64 {
1240 self.latest_monitor_update_id
1243 pub fn should_announce(&self) -> bool {
1244 self.config.announced_channel
1247 pub fn is_outbound(&self) -> bool {
1248 self.channel_transaction_parameters.is_outbound_from_holder
1251 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1252 /// Allowed in any state (including after shutdown)
1253 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1254 self.config.options.forwarding_fee_base_msat
1257 /// Returns true if we've ever received a message from the remote end for this Channel
1258 pub fn have_received_message(&self) -> bool {
1259 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1262 /// Returns true if this channel is fully established and not known to be closing.
1263 /// Allowed in any state (including after shutdown)
1264 pub fn is_usable(&self) -> bool {
1265 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1266 !self.channel_state.is_local_shutdown_sent() &&
1267 !self.channel_state.is_remote_shutdown_sent() &&
1268 !self.monitor_pending_channel_ready
1271 /// shutdown state returns the state of the channel in its various stages of shutdown
1272 pub fn shutdown_state(&self) -> ChannelShutdownState {
1273 match self.channel_state {
1274 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1275 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1276 ChannelShutdownState::ShutdownInitiated
1277 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1278 ChannelShutdownState::ResolvingHTLCs
1279 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1280 ChannelShutdownState::NegotiatingClosingFee
1282 ChannelShutdownState::NotShuttingDown
1284 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1285 _ => ChannelShutdownState::NotShuttingDown,
1289 fn closing_negotiation_ready(&self) -> bool {
1290 let is_ready_to_close = match self.channel_state {
1291 ChannelState::AwaitingChannelReady(flags) =>
1292 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1293 ChannelState::ChannelReady(flags) =>
1294 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1297 self.pending_inbound_htlcs.is_empty() &&
1298 self.pending_outbound_htlcs.is_empty() &&
1299 self.pending_update_fee.is_none() &&
1303 /// Returns true if this channel is currently available for use. This is a superset of
1304 /// is_usable() and considers things like the channel being temporarily disabled.
1305 /// Allowed in any state (including after shutdown)
1306 pub fn is_live(&self) -> bool {
1307 self.is_usable() && !self.channel_state.is_peer_disconnected()
1310 // Public utilities:
1312 pub fn channel_id(&self) -> ChannelId {
1316 // Return the `temporary_channel_id` used during channel establishment.
1318 // Will return `None` for channels created prior to LDK version 0.0.115.
1319 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1320 self.temporary_channel_id
1323 pub fn minimum_depth(&self) -> Option<u32> {
1327 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1328 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1329 pub fn get_user_id(&self) -> u128 {
1333 /// Gets the channel's type
1334 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1338 /// Gets the channel's `short_channel_id`.
1340 /// Will return `None` if the channel hasn't been confirmed yet.
1341 pub fn get_short_channel_id(&self) -> Option<u64> {
1342 self.short_channel_id
1345 /// Allowed in any state (including after shutdown)
1346 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1347 self.latest_inbound_scid_alias
1350 /// Allowed in any state (including after shutdown)
1351 pub fn outbound_scid_alias(&self) -> u64 {
1352 self.outbound_scid_alias
1355 /// Returns the holder signer for this channel.
1357 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1358 return &self.holder_signer
1361 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1362 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1363 /// or prior to any channel actions during `Channel` initialization.
1364 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1365 debug_assert_eq!(self.outbound_scid_alias, 0);
1366 self.outbound_scid_alias = outbound_scid_alias;
1369 /// Returns the funding_txo we either got from our peer, or were given by
1370 /// get_funding_created.
1371 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1372 self.channel_transaction_parameters.funding_outpoint
1375 /// Returns the height in which our funding transaction was confirmed.
1376 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1377 let conf_height = self.funding_tx_confirmation_height;
1378 if conf_height > 0 {
1385 /// Returns the block hash in which our funding transaction was confirmed.
1386 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1387 self.funding_tx_confirmed_in
1390 /// Returns the current number of confirmations on the funding transaction.
1391 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1392 if self.funding_tx_confirmation_height == 0 {
1393 // We either haven't seen any confirmation yet, or observed a reorg.
1397 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1400 fn get_holder_selected_contest_delay(&self) -> u16 {
1401 self.channel_transaction_parameters.holder_selected_contest_delay
1404 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1405 &self.channel_transaction_parameters.holder_pubkeys
1408 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1409 self.channel_transaction_parameters.counterparty_parameters
1410 .as_ref().map(|params| params.selected_contest_delay)
1413 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1414 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1417 /// Allowed in any state (including after shutdown)
1418 pub fn get_counterparty_node_id(&self) -> PublicKey {
1419 self.counterparty_node_id
1422 /// Allowed in any state (including after shutdown)
1423 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1424 self.holder_htlc_minimum_msat
1427 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1428 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1429 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1432 /// Allowed in any state (including after shutdown)
1433 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1435 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1436 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1437 // channel might have been used to route very small values (either by honest users or as DoS).
1438 self.channel_value_satoshis * 1000 * 9 / 10,
1440 self.counterparty_max_htlc_value_in_flight_msat
1444 /// Allowed in any state (including after shutdown)
1445 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1446 self.counterparty_htlc_minimum_msat
1449 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1450 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1451 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1454 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1455 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1456 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1458 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1459 party_max_htlc_value_in_flight_msat
1464 pub fn get_value_satoshis(&self) -> u64 {
1465 self.channel_value_satoshis
1468 pub fn get_fee_proportional_millionths(&self) -> u32 {
1469 self.config.options.forwarding_fee_proportional_millionths
1472 pub fn get_cltv_expiry_delta(&self) -> u16 {
1473 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1476 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1477 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1478 where F::Target: FeeEstimator
1480 match self.config.options.max_dust_htlc_exposure {
1481 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1482 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1483 ConfirmationTarget::OnChainSweep) as u64;
1484 feerate_per_kw.saturating_mul(multiplier)
1486 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1490 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1491 pub fn prev_config(&self) -> Option<ChannelConfig> {
1492 self.prev_config.map(|prev_config| prev_config.0)
1495 // Checks whether we should emit a `ChannelPending` event.
1496 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1497 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1500 // Returns whether we already emitted a `ChannelPending` event.
1501 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1502 self.channel_pending_event_emitted
1505 // Remembers that we already emitted a `ChannelPending` event.
1506 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1507 self.channel_pending_event_emitted = true;
1510 // Checks whether we should emit a `ChannelReady` event.
1511 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1512 self.is_usable() && !self.channel_ready_event_emitted
1515 // Remembers that we already emitted a `ChannelReady` event.
1516 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1517 self.channel_ready_event_emitted = true;
1520 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1521 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1522 /// no longer be considered when forwarding HTLCs.
1523 pub fn maybe_expire_prev_config(&mut self) {
1524 if self.prev_config.is_none() {
1527 let prev_config = self.prev_config.as_mut().unwrap();
1529 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1530 self.prev_config = None;
1534 /// Returns the current [`ChannelConfig`] applied to the channel.
1535 pub fn config(&self) -> ChannelConfig {
1539 /// Updates the channel's config. A bool is returned indicating whether the config update
1540 /// applied resulted in a new ChannelUpdate message.
1541 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1542 let did_channel_update =
1543 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1544 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1545 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1546 if did_channel_update {
1547 self.prev_config = Some((self.config.options, 0));
1548 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1549 // policy change to propagate throughout the network.
1550 self.update_time_counter += 1;
1552 self.config.options = *config;
1556 /// Returns true if funding_signed was sent/received and the
1557 /// funding transaction has been broadcast if necessary.
1558 pub fn is_funding_broadcast(&self) -> bool {
1559 !self.channel_state.is_pre_funded_state() &&
1560 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1563 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1564 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1565 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1566 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1567 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1569 /// @local is used only to convert relevant internal structures which refer to remote vs local
1570 /// to decide value of outputs and direction of HTLCs.
1571 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1572 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1573 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1574 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1575 /// which peer generated this transaction and "to whom" this transaction flows.
1577 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1578 where L::Target: Logger
1580 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1581 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1582 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1584 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1585 let mut remote_htlc_total_msat = 0;
1586 let mut local_htlc_total_msat = 0;
1587 let mut value_to_self_msat_offset = 0;
1589 let mut feerate_per_kw = self.feerate_per_kw;
1590 if let Some((feerate, update_state)) = self.pending_update_fee {
1591 if match update_state {
1592 // Note that these match the inclusion criteria when scanning
1593 // pending_inbound_htlcs below.
1594 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1595 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1596 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1598 feerate_per_kw = feerate;
1602 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1603 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1604 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1606 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1608 macro_rules! get_htlc_in_commitment {
1609 ($htlc: expr, $offered: expr) => {
1610 HTLCOutputInCommitment {
1612 amount_msat: $htlc.amount_msat,
1613 cltv_expiry: $htlc.cltv_expiry,
1614 payment_hash: $htlc.payment_hash,
1615 transaction_output_index: None
1620 macro_rules! add_htlc_output {
1621 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1622 if $outbound == local { // "offered HTLC output"
1623 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1624 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1627 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1629 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1630 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1631 included_non_dust_htlcs.push((htlc_in_tx, $source));
1633 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1634 included_dust_htlcs.push((htlc_in_tx, $source));
1637 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1638 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1641 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1643 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1644 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1645 included_non_dust_htlcs.push((htlc_in_tx, $source));
1647 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1648 included_dust_htlcs.push((htlc_in_tx, $source));
1654 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1656 for ref htlc in self.pending_inbound_htlcs.iter() {
1657 let (include, state_name) = match htlc.state {
1658 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1659 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1660 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1661 InboundHTLCState::Committed => (true, "Committed"),
1662 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1666 add_htlc_output!(htlc, false, None, state_name);
1667 remote_htlc_total_msat += htlc.amount_msat;
1669 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1671 &InboundHTLCState::LocalRemoved(ref reason) => {
1672 if generated_by_local {
1673 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1674 inbound_htlc_preimages.push(preimage);
1675 value_to_self_msat_offset += htlc.amount_msat as i64;
1685 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1687 for ref htlc in self.pending_outbound_htlcs.iter() {
1688 let (include, state_name) = match htlc.state {
1689 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1690 OutboundHTLCState::Committed => (true, "Committed"),
1691 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1692 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1693 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1696 let preimage_opt = match htlc.state {
1697 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1698 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1699 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1703 if let Some(preimage) = preimage_opt {
1704 outbound_htlc_preimages.push(preimage);
1708 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1709 local_htlc_total_msat += htlc.amount_msat;
1711 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1713 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1714 value_to_self_msat_offset -= htlc.amount_msat as i64;
1716 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1717 if !generated_by_local {
1718 value_to_self_msat_offset -= htlc.amount_msat as i64;
1726 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1727 assert!(value_to_self_msat >= 0);
1728 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1729 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1730 // "violate" their reserve value by couting those against it. Thus, we have to convert
1731 // everything to i64 before subtracting as otherwise we can overflow.
1732 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1733 assert!(value_to_remote_msat >= 0);
1735 #[cfg(debug_assertions)]
1737 // Make sure that the to_self/to_remote is always either past the appropriate
1738 // channel_reserve *or* it is making progress towards it.
1739 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1740 self.holder_max_commitment_tx_output.lock().unwrap()
1742 self.counterparty_max_commitment_tx_output.lock().unwrap()
1744 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1745 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1746 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1747 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1750 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1751 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1752 let (value_to_self, value_to_remote) = if self.is_outbound() {
1753 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1755 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1758 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1759 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1760 let (funding_pubkey_a, funding_pubkey_b) = if local {
1761 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1763 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1766 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1767 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1772 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1773 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1778 let num_nondust_htlcs = included_non_dust_htlcs.len();
1780 let channel_parameters =
1781 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1782 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1783 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1790 &mut included_non_dust_htlcs,
1793 let mut htlcs_included = included_non_dust_htlcs;
1794 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1795 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1796 htlcs_included.append(&mut included_dust_htlcs);
1798 // For the stats, trimmed-to-0 the value in msats accordingly
1799 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1800 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1808 local_balance_msat: value_to_self_msat as u64,
1809 remote_balance_msat: value_to_remote_msat as u64,
1810 inbound_htlc_preimages,
1811 outbound_htlc_preimages,
1816 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1817 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1818 /// our counterparty!)
1819 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1820 /// TODO Some magic rust shit to compile-time check this?
1821 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1822 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1823 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1824 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1825 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1827 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1831 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1832 /// will sign and send to our counterparty.
1833 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1834 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1835 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1836 //may see payments to it!
1837 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1838 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1839 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1841 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1844 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1845 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1846 /// Panics if called before accept_channel/InboundV1Channel::new
1847 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1848 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1851 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1852 &self.get_counterparty_pubkeys().funding_pubkey
1855 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1859 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1860 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1861 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1862 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1863 // more dust balance if the feerate increases when we have several HTLCs pending
1864 // which are near the dust limit.
1865 let mut feerate_per_kw = self.feerate_per_kw;
1866 // If there's a pending update fee, use it to ensure we aren't under-estimating
1867 // potential feerate updates coming soon.
1868 if let Some((feerate, _)) = self.pending_update_fee {
1869 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1871 if let Some(feerate) = outbound_feerate_update {
1872 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1874 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1877 /// Get forwarding information for the counterparty.
1878 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1879 self.counterparty_forwarding_info.clone()
1882 /// Returns a HTLCStats about inbound pending htlcs
1883 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1885 let mut stats = HTLCStats {
1886 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1887 pending_htlcs_value_msat: 0,
1888 on_counterparty_tx_dust_exposure_msat: 0,
1889 on_holder_tx_dust_exposure_msat: 0,
1890 holding_cell_msat: 0,
1891 on_holder_tx_holding_cell_htlcs_count: 0,
1894 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1897 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1898 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1899 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1901 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1902 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1903 for ref htlc in context.pending_inbound_htlcs.iter() {
1904 stats.pending_htlcs_value_msat += htlc.amount_msat;
1905 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1906 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1908 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1909 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1915 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1916 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1918 let mut stats = HTLCStats {
1919 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1920 pending_htlcs_value_msat: 0,
1921 on_counterparty_tx_dust_exposure_msat: 0,
1922 on_holder_tx_dust_exposure_msat: 0,
1923 holding_cell_msat: 0,
1924 on_holder_tx_holding_cell_htlcs_count: 0,
1927 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1930 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1931 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1932 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1934 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1935 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1936 for ref htlc in context.pending_outbound_htlcs.iter() {
1937 stats.pending_htlcs_value_msat += htlc.amount_msat;
1938 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1939 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1941 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1942 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1946 for update in context.holding_cell_htlc_updates.iter() {
1947 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1948 stats.pending_htlcs += 1;
1949 stats.pending_htlcs_value_msat += amount_msat;
1950 stats.holding_cell_msat += amount_msat;
1951 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1952 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1954 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1955 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1957 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1964 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1965 /// Doesn't bother handling the
1966 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1967 /// corner case properly.
1968 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1969 -> AvailableBalances
1970 where F::Target: FeeEstimator
1972 let context = &self;
1973 // Note that we have to handle overflow due to the above case.
1974 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1975 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1977 let mut balance_msat = context.value_to_self_msat;
1978 for ref htlc in context.pending_inbound_htlcs.iter() {
1979 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1980 balance_msat += htlc.amount_msat;
1983 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1985 let outbound_capacity_msat = context.value_to_self_msat
1986 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1988 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1990 let mut available_capacity_msat = outbound_capacity_msat;
1992 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1993 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1997 if context.is_outbound() {
1998 // We should mind channel commit tx fee when computing how much of the available capacity
1999 // can be used in the next htlc. Mirrors the logic in send_htlc.
2001 // The fee depends on whether the amount we will be sending is above dust or not,
2002 // and the answer will in turn change the amount itself — making it a circular
2004 // This complicates the computation around dust-values, up to the one-htlc-value.
2005 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2006 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2007 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2010 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2011 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2012 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2013 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2014 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2015 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2016 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2019 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2020 // value ends up being below dust, we have this fee available again. In that case,
2021 // match the value to right-below-dust.
2022 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2023 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2024 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2025 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2026 debug_assert!(one_htlc_difference_msat != 0);
2027 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2028 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2029 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2031 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2034 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2035 // sending a new HTLC won't reduce their balance below our reserve threshold.
2036 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2037 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2038 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2041 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2042 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2044 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2045 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2046 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2048 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2049 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2050 // we've selected for them, we can only send dust HTLCs.
2051 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2055 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2057 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2058 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2059 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2060 // send above the dust limit (as the router can always overpay to meet the dust limit).
2061 let mut remaining_msat_below_dust_exposure_limit = None;
2062 let mut dust_exposure_dust_limit_msat = 0;
2063 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2065 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2066 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2068 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2069 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2070 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2072 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2073 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2074 remaining_msat_below_dust_exposure_limit =
2075 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2076 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2079 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2080 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2081 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2082 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2083 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2084 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2087 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2088 if available_capacity_msat < dust_exposure_dust_limit_msat {
2089 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2091 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2095 available_capacity_msat = cmp::min(available_capacity_msat,
2096 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2098 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2099 available_capacity_msat = 0;
2103 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2104 - context.value_to_self_msat as i64
2105 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2106 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2108 outbound_capacity_msat,
2109 next_outbound_htlc_limit_msat: available_capacity_msat,
2110 next_outbound_htlc_minimum_msat,
2115 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2116 let context = &self;
2117 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2120 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2121 /// number of pending HTLCs that are on track to be in our next commitment tx.
2123 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2124 /// `fee_spike_buffer_htlc` is `Some`.
2126 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2127 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2129 /// Dust HTLCs are excluded.
2130 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2131 let context = &self;
2132 assert!(context.is_outbound());
2134 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2137 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2138 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2140 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2141 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2143 let mut addl_htlcs = 0;
2144 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2146 HTLCInitiator::LocalOffered => {
2147 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2151 HTLCInitiator::RemoteOffered => {
2152 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2158 let mut included_htlcs = 0;
2159 for ref htlc in context.pending_inbound_htlcs.iter() {
2160 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2163 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2164 // transaction including this HTLC if it times out before they RAA.
2165 included_htlcs += 1;
2168 for ref htlc in context.pending_outbound_htlcs.iter() {
2169 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2173 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2174 OutboundHTLCState::Committed => included_htlcs += 1,
2175 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2176 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2177 // transaction won't be generated until they send us their next RAA, which will mean
2178 // dropping any HTLCs in this state.
2183 for htlc in context.holding_cell_htlc_updates.iter() {
2185 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2186 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2191 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2192 // ack we're guaranteed to never include them in commitment txs anymore.
2196 let num_htlcs = included_htlcs + addl_htlcs;
2197 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2198 #[cfg(any(test, fuzzing))]
2201 if fee_spike_buffer_htlc.is_some() {
2202 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2204 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2205 + context.holding_cell_htlc_updates.len();
2206 let commitment_tx_info = CommitmentTxInfoCached {
2208 total_pending_htlcs,
2209 next_holder_htlc_id: match htlc.origin {
2210 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2211 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2213 next_counterparty_htlc_id: match htlc.origin {
2214 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2215 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2217 feerate: context.feerate_per_kw,
2219 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2224 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2225 /// pending HTLCs that are on track to be in their next commitment tx
2227 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2228 /// `fee_spike_buffer_htlc` is `Some`.
2230 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2231 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2233 /// Dust HTLCs are excluded.
2234 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2235 let context = &self;
2236 assert!(!context.is_outbound());
2238 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2241 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2242 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2244 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2245 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2247 let mut addl_htlcs = 0;
2248 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2250 HTLCInitiator::LocalOffered => {
2251 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2255 HTLCInitiator::RemoteOffered => {
2256 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2262 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2263 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2264 // committed outbound HTLCs, see below.
2265 let mut included_htlcs = 0;
2266 for ref htlc in context.pending_inbound_htlcs.iter() {
2267 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2270 included_htlcs += 1;
2273 for ref htlc in context.pending_outbound_htlcs.iter() {
2274 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2277 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2278 // i.e. if they've responded to us with an RAA after announcement.
2280 OutboundHTLCState::Committed => included_htlcs += 1,
2281 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2282 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2287 let num_htlcs = included_htlcs + addl_htlcs;
2288 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2289 #[cfg(any(test, fuzzing))]
2292 if fee_spike_buffer_htlc.is_some() {
2293 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2295 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2296 let commitment_tx_info = CommitmentTxInfoCached {
2298 total_pending_htlcs,
2299 next_holder_htlc_id: match htlc.origin {
2300 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2301 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2303 next_counterparty_htlc_id: match htlc.origin {
2304 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2305 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2307 feerate: context.feerate_per_kw,
2309 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2314 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2315 where F: Fn() -> Option<O> {
2316 match self.channel_state {
2317 ChannelState::FundingNegotiated => f(),
2318 ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2327 /// Returns the transaction if there is a pending funding transaction that is yet to be
2329 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2330 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2333 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2335 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2336 self.if_unbroadcasted_funding(||
2337 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2341 /// Returns whether the channel is funded in a batch.
2342 pub fn is_batch_funding(&self) -> bool {
2343 self.is_batch_funding.is_some()
2346 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2348 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2349 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2352 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2353 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2354 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2355 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2356 /// immediately (others we will have to allow to time out).
2357 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2358 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2359 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2360 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2361 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2362 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2364 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2365 // return them to fail the payment.
2366 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2367 let counterparty_node_id = self.get_counterparty_node_id();
2368 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2370 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2371 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2376 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2377 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2378 // returning a channel monitor update here would imply a channel monitor update before
2379 // we even registered the channel monitor to begin with, which is invalid.
2380 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2381 // funding transaction, don't return a funding txo (which prevents providing the
2382 // monitor update to the user, even if we return one).
2383 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2384 let generate_monitor_update = match self.channel_state {
2385 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2388 if generate_monitor_update {
2389 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2390 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2391 update_id: self.latest_monitor_update_id,
2392 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2396 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2398 self.channel_state = ChannelState::ShutdownComplete;
2399 self.update_time_counter += 1;
2402 dropped_outbound_htlcs,
2403 unbroadcasted_batch_funding_txid,
2404 channel_id: self.channel_id,
2405 counterparty_node_id: self.counterparty_node_id,
2409 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2410 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2411 let counterparty_keys = self.build_remote_transaction_keys();
2412 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2413 let signature = match &self.holder_signer {
2414 // TODO (taproot|arik): move match into calling method for Taproot
2415 ChannelSignerType::Ecdsa(ecdsa) => {
2416 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2417 .map(|(sig, _)| sig).ok()?
2419 // TODO (taproot|arik)
2424 if self.signer_pending_funding {
2425 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2426 self.signer_pending_funding = false;
2429 Some(msgs::FundingCreated {
2430 temporary_channel_id: self.temporary_channel_id.unwrap(),
2431 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2432 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2435 partial_signature_with_nonce: None,
2437 next_local_nonce: None,
2441 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2442 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2443 let counterparty_keys = self.build_remote_transaction_keys();
2444 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2446 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2447 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2448 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2449 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2451 match &self.holder_signer {
2452 // TODO (arik): move match into calling method for Taproot
2453 ChannelSignerType::Ecdsa(ecdsa) => {
2454 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2455 .map(|(signature, _)| msgs::FundingSigned {
2456 channel_id: self.channel_id(),
2459 partial_signature_with_nonce: None,
2463 if funding_signed.is_none() {
2464 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2465 self.signer_pending_funding = true;
2466 } else if self.signer_pending_funding {
2467 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2468 self.signer_pending_funding = false;
2471 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2472 (counterparty_initial_commitment_tx, funding_signed)
2474 // TODO (taproot|arik)
2481 // Internal utility functions for channels
2483 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2484 /// `channel_value_satoshis` in msat, set through
2485 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2487 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2489 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2490 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2491 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2493 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2496 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2498 channel_value_satoshis * 10 * configured_percent
2501 /// Returns a minimum channel reserve value the remote needs to maintain,
2502 /// required by us according to the configured or default
2503 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2505 /// Guaranteed to return a value no larger than channel_value_satoshis
2507 /// This is used both for outbound and inbound channels and has lower bound
2508 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2509 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2510 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2511 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2514 /// This is for legacy reasons, present for forward-compatibility.
2515 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2516 /// from storage. Hence, we use this function to not persist default values of
2517 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2518 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2519 let (q, _) = channel_value_satoshis.overflowing_div(100);
2520 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2523 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2524 // Note that num_htlcs should not include dust HTLCs.
2526 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2527 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2530 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2531 // Note that num_htlcs should not include dust HTLCs.
2532 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2533 // Note that we need to divide before multiplying to round properly,
2534 // since the lowest denomination of bitcoin on-chain is the satoshi.
2535 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2538 // Holder designates channel data owned for the benefit of the user client.
2539 // Counterparty designates channel data owned by the another channel participant entity.
2540 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2541 pub context: ChannelContext<SP>,
2544 #[cfg(any(test, fuzzing))]
2545 struct CommitmentTxInfoCached {
2547 total_pending_htlcs: usize,
2548 next_holder_htlc_id: u64,
2549 next_counterparty_htlc_id: u64,
2553 impl<SP: Deref> Channel<SP> where
2554 SP::Target: SignerProvider,
2555 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2557 fn check_remote_fee<F: Deref, L: Deref>(
2558 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2559 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2560 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2562 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2563 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2565 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2567 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2568 if feerate_per_kw < lower_limit {
2569 if let Some(cur_feerate) = cur_feerate_per_kw {
2570 if feerate_per_kw > cur_feerate {
2572 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2573 cur_feerate, feerate_per_kw);
2577 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2583 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2584 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2585 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2586 // outside of those situations will fail.
2587 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2591 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2596 1 + // script length (0)
2600 )*4 + // * 4 for non-witness parts
2601 2 + // witness marker and flag
2602 1 + // witness element count
2603 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2604 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2605 2*(1 + 71); // two signatures + sighash type flags
2606 if let Some(spk) = a_scriptpubkey {
2607 ret += ((8+1) + // output values and script length
2608 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2610 if let Some(spk) = b_scriptpubkey {
2611 ret += ((8+1) + // output values and script length
2612 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2618 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2619 assert!(self.context.pending_inbound_htlcs.is_empty());
2620 assert!(self.context.pending_outbound_htlcs.is_empty());
2621 assert!(self.context.pending_update_fee.is_none());
2623 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2624 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2625 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2627 if value_to_holder < 0 {
2628 assert!(self.context.is_outbound());
2629 total_fee_satoshis += (-value_to_holder) as u64;
2630 } else if value_to_counterparty < 0 {
2631 assert!(!self.context.is_outbound());
2632 total_fee_satoshis += (-value_to_counterparty) as u64;
2635 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2636 value_to_counterparty = 0;
2639 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2640 value_to_holder = 0;
2643 assert!(self.context.shutdown_scriptpubkey.is_some());
2644 let holder_shutdown_script = self.get_closing_scriptpubkey();
2645 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2646 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2648 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2649 (closing_transaction, total_fee_satoshis)
2652 fn funding_outpoint(&self) -> OutPoint {
2653 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2656 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2659 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2660 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2662 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2664 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2665 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2666 where L::Target: Logger {
2667 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2668 // (see equivalent if condition there).
2669 assert!(self.context.channel_state.should_force_holding_cell());
2670 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2671 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2672 self.context.latest_monitor_update_id = mon_update_id;
2673 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2674 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2678 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2679 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2680 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2681 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2683 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2684 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2687 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2688 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2689 // these, but for now we just have to treat them as normal.
2691 let mut pending_idx = core::usize::MAX;
2692 let mut htlc_value_msat = 0;
2693 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2694 if htlc.htlc_id == htlc_id_arg {
2695 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2696 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2697 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2699 InboundHTLCState::Committed => {},
2700 InboundHTLCState::LocalRemoved(ref reason) => {
2701 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2703 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2704 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2706 return UpdateFulfillFetch::DuplicateClaim {};
2709 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2710 // Don't return in release mode here so that we can update channel_monitor
2714 htlc_value_msat = htlc.amount_msat;
2718 if pending_idx == core::usize::MAX {
2719 #[cfg(any(test, fuzzing))]
2720 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2721 // this is simply a duplicate claim, not previously failed and we lost funds.
2722 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2723 return UpdateFulfillFetch::DuplicateClaim {};
2726 // Now update local state:
2728 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2729 // can claim it even if the channel hits the chain before we see their next commitment.
2730 self.context.latest_monitor_update_id += 1;
2731 let monitor_update = ChannelMonitorUpdate {
2732 update_id: self.context.latest_monitor_update_id,
2733 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2734 payment_preimage: payment_preimage_arg.clone(),
2738 if self.context.channel_state.should_force_holding_cell() {
2739 // Note that this condition is the same as the assertion in
2740 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2741 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2742 // do not not get into this branch.
2743 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2744 match pending_update {
2745 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2746 if htlc_id_arg == htlc_id {
2747 // Make sure we don't leave latest_monitor_update_id incremented here:
2748 self.context.latest_monitor_update_id -= 1;
2749 #[cfg(any(test, fuzzing))]
2750 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2751 return UpdateFulfillFetch::DuplicateClaim {};
2754 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2755 if htlc_id_arg == htlc_id {
2756 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2757 // TODO: We may actually be able to switch to a fulfill here, though its
2758 // rare enough it may not be worth the complexity burden.
2759 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2760 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2766 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2767 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2768 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2770 #[cfg(any(test, fuzzing))]
2771 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2772 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2774 #[cfg(any(test, fuzzing))]
2775 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2778 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2779 if let InboundHTLCState::Committed = htlc.state {
2781 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2782 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2784 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2785 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2788 UpdateFulfillFetch::NewClaim {
2791 msg: Some(msgs::UpdateFulfillHTLC {
2792 channel_id: self.context.channel_id(),
2793 htlc_id: htlc_id_arg,
2794 payment_preimage: payment_preimage_arg,
2799 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2800 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2801 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2802 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2803 // Even if we aren't supposed to let new monitor updates with commitment state
2804 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2805 // matter what. Sadly, to push a new monitor update which flies before others
2806 // already queued, we have to insert it into the pending queue and update the
2807 // update_ids of all the following monitors.
2808 if release_cs_monitor && msg.is_some() {
2809 let mut additional_update = self.build_commitment_no_status_check(logger);
2810 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2811 // to be strictly increasing by one, so decrement it here.
2812 self.context.latest_monitor_update_id = monitor_update.update_id;
2813 monitor_update.updates.append(&mut additional_update.updates);
2815 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2816 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2817 monitor_update.update_id = new_mon_id;
2818 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2819 held_update.update.update_id += 1;
2822 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2823 let update = self.build_commitment_no_status_check(logger);
2824 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2830 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2831 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2833 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2837 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2838 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2839 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2840 /// before we fail backwards.
2842 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2843 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2844 /// [`ChannelError::Ignore`].
2845 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2846 -> Result<(), ChannelError> where L::Target: Logger {
2847 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2848 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2851 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2852 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2853 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2854 /// before we fail backwards.
2856 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2857 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2858 /// [`ChannelError::Ignore`].
2859 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2860 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2861 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2862 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2865 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2866 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2867 // these, but for now we just have to treat them as normal.
2869 let mut pending_idx = core::usize::MAX;
2870 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2871 if htlc.htlc_id == htlc_id_arg {
2873 InboundHTLCState::Committed => {},
2874 InboundHTLCState::LocalRemoved(ref reason) => {
2875 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2877 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2882 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2883 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2889 if pending_idx == core::usize::MAX {
2890 #[cfg(any(test, fuzzing))]
2891 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2892 // is simply a duplicate fail, not previously failed and we failed-back too early.
2893 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2897 if self.context.channel_state.should_force_holding_cell() {
2898 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2899 force_holding_cell = true;
2902 // Now update local state:
2903 if force_holding_cell {
2904 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2905 match pending_update {
2906 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2907 if htlc_id_arg == htlc_id {
2908 #[cfg(any(test, fuzzing))]
2909 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2913 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2914 if htlc_id_arg == htlc_id {
2915 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2916 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2922 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2923 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2924 htlc_id: htlc_id_arg,
2930 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2932 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2933 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2936 Ok(Some(msgs::UpdateFailHTLC {
2937 channel_id: self.context.channel_id(),
2938 htlc_id: htlc_id_arg,
2943 // Message handlers:
2944 /// Updates the state of the channel to indicate that all channels in the batch have received
2945 /// funding_signed and persisted their monitors.
2946 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2947 /// treated as a non-batch channel going forward.
2948 pub fn set_batch_ready(&mut self) {
2949 self.context.is_batch_funding = None;
2950 self.context.channel_state.clear_waiting_for_batch();
2953 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2954 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2956 pub fn channel_ready<NS: Deref, L: Deref>(
2957 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2958 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2959 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2961 NS::Target: NodeSigner,
2964 if self.context.channel_state.is_peer_disconnected() {
2965 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2966 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2969 if let Some(scid_alias) = msg.short_channel_id_alias {
2970 if Some(scid_alias) != self.context.short_channel_id {
2971 // The scid alias provided can be used to route payments *from* our counterparty,
2972 // i.e. can be used for inbound payments and provided in invoices, but is not used
2973 // when routing outbound payments.
2974 self.context.latest_inbound_scid_alias = Some(scid_alias);
2978 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2979 // batch, but we can receive channel_ready messages.
2980 let mut check_reconnection = false;
2981 match &self.context.channel_state {
2982 ChannelState::AwaitingChannelReady(flags) => {
2983 let flags = *flags & !FundedStateFlags::ALL;
2984 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
2985 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
2986 // If we reconnected before sending our `channel_ready` they may still resend theirs.
2987 check_reconnection = true;
2988 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
2989 self.context.channel_state.set_their_channel_ready();
2990 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
2991 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
2992 self.context.update_time_counter += 1;
2994 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
2995 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
2998 // If we reconnected before sending our `channel_ready` they may still resend theirs.
2999 ChannelState::ChannelReady(_) => check_reconnection = true,
3000 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3002 if check_reconnection {
3003 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3004 // required, or they're sending a fresh SCID alias.
3005 let expected_point =
3006 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3007 // If they haven't ever sent an updated point, the point they send should match
3009 self.context.counterparty_cur_commitment_point
3010 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3011 // If we've advanced the commitment number once, the second commitment point is
3012 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3013 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3014 self.context.counterparty_prev_commitment_point
3016 // If they have sent updated points, channel_ready is always supposed to match
3017 // their "first" point, which we re-derive here.
3018 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3019 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3020 ).expect("We already advanced, so previous secret keys should have been validated already")))
3022 if expected_point != Some(msg.next_per_commitment_point) {
3023 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3028 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3029 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3031 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3033 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3036 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3037 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3038 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3039 ) -> Result<(), ChannelError>
3040 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3041 FE::Target: FeeEstimator, L::Target: Logger,
3043 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3044 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3046 // We can't accept HTLCs sent after we've sent a shutdown.
3047 if self.context.channel_state.is_local_shutdown_sent() {
3048 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3050 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3051 if self.context.channel_state.is_remote_shutdown_sent() {
3052 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3054 if self.context.channel_state.is_peer_disconnected() {
3055 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3057 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3058 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3060 if msg.amount_msat == 0 {
3061 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3063 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3064 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3067 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3068 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3069 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3070 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3072 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3073 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3076 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3077 // the reserve_satoshis we told them to always have as direct payment so that they lose
3078 // something if we punish them for broadcasting an old state).
3079 // Note that we don't really care about having a small/no to_remote output in our local
3080 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3081 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3082 // present in the next commitment transaction we send them (at least for fulfilled ones,
3083 // failed ones won't modify value_to_self).
3084 // Note that we will send HTLCs which another instance of rust-lightning would think
3085 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3086 // Channel state once they will not be present in the next received commitment
3088 let mut removed_outbound_total_msat = 0;
3089 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3090 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3091 removed_outbound_total_msat += htlc.amount_msat;
3092 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3093 removed_outbound_total_msat += htlc.amount_msat;
3097 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3098 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3101 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3102 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3103 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3105 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3106 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3107 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3108 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3109 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3110 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3111 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3115 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3116 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3117 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3118 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3119 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3120 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3121 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3125 let pending_value_to_self_msat =
3126 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3127 let pending_remote_value_msat =
3128 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3129 if pending_remote_value_msat < msg.amount_msat {
3130 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3133 // Check that the remote can afford to pay for this HTLC on-chain at the current
3134 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3136 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3137 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3138 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3140 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3141 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3145 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3146 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3148 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3149 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3153 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3154 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3158 if !self.context.is_outbound() {
3159 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3160 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3161 // side, only on the sender's. Note that with anchor outputs we are no longer as
3162 // sensitive to fee spikes, so we need to account for them.
3163 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3164 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3165 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3166 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3168 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3169 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3170 // the HTLC, i.e. its status is already set to failing.
3171 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3172 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3175 // Check that they won't violate our local required channel reserve by adding this HTLC.
3176 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3177 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3178 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3179 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3182 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3183 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3185 if msg.cltv_expiry >= 500000000 {
3186 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3189 if self.context.channel_state.is_local_shutdown_sent() {
3190 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3191 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3195 // Now update local state:
3196 self.context.next_counterparty_htlc_id += 1;
3197 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3198 htlc_id: msg.htlc_id,
3199 amount_msat: msg.amount_msat,
3200 payment_hash: msg.payment_hash,
3201 cltv_expiry: msg.cltv_expiry,
3202 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3207 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3209 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3210 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3211 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3212 if htlc.htlc_id == htlc_id {
3213 let outcome = match check_preimage {
3214 None => fail_reason.into(),
3215 Some(payment_preimage) => {
3216 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3217 if payment_hash != htlc.payment_hash {
3218 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3220 OutboundHTLCOutcome::Success(Some(payment_preimage))
3224 OutboundHTLCState::LocalAnnounced(_) =>
3225 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3226 OutboundHTLCState::Committed => {
3227 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3229 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3230 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3235 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3238 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3239 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3240 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3242 if self.context.channel_state.is_peer_disconnected() {
3243 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3246 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3249 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3250 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3251 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3253 if self.context.channel_state.is_peer_disconnected() {
3254 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3257 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3261 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3262 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3263 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3265 if self.context.channel_state.is_peer_disconnected() {
3266 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3269 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3273 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3274 where L::Target: Logger
3276 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3277 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3279 if self.context.channel_state.is_peer_disconnected() {
3280 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3282 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3283 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3286 let funding_script = self.context.get_funding_redeemscript();
3288 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3290 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3291 let commitment_txid = {
3292 let trusted_tx = commitment_stats.tx.trust();
3293 let bitcoin_tx = trusted_tx.built_transaction();
3294 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3296 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3297 log_bytes!(msg.signature.serialize_compact()[..]),
3298 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3299 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3300 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3301 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3305 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3307 // If our counterparty updated the channel fee in this commitment transaction, check that
3308 // they can actually afford the new fee now.
3309 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3310 update_state == FeeUpdateState::RemoteAnnounced
3313 debug_assert!(!self.context.is_outbound());
3314 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3315 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3316 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3319 #[cfg(any(test, fuzzing))]
3321 if self.context.is_outbound() {
3322 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3323 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3324 if let Some(info) = projected_commit_tx_info {
3325 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3326 + self.context.holding_cell_htlc_updates.len();
3327 if info.total_pending_htlcs == total_pending_htlcs
3328 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3329 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3330 && info.feerate == self.context.feerate_per_kw {
3331 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3337 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3338 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3341 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3342 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3343 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3344 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3345 // backwards compatibility, we never use it in production. To provide test coverage, here,
3346 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3347 #[allow(unused_assignments, unused_mut)]
3348 let mut separate_nondust_htlc_sources = false;
3349 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3350 use core::hash::{BuildHasher, Hasher};
3351 // Get a random value using the only std API to do so - the DefaultHasher
3352 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3353 separate_nondust_htlc_sources = rand_val % 2 == 0;
3356 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3357 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3358 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3359 if let Some(_) = htlc.transaction_output_index {
3360 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3361 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3362 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3364 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3365 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3366 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3367 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3368 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3369 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3370 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3371 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3373 if !separate_nondust_htlc_sources {
3374 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3377 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3379 if separate_nondust_htlc_sources {
3380 if let Some(source) = source_opt.take() {
3381 nondust_htlc_sources.push(source);
3384 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3387 let holder_commitment_tx = HolderCommitmentTransaction::new(
3388 commitment_stats.tx,
3390 msg.htlc_signatures.clone(),
3391 &self.context.get_holder_pubkeys().funding_pubkey,
3392 self.context.counterparty_funding_pubkey()
3395 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3396 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3398 // Update state now that we've passed all the can-fail calls...
3399 let mut need_commitment = false;
3400 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3401 if *update_state == FeeUpdateState::RemoteAnnounced {
3402 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3403 need_commitment = true;
3407 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3408 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3409 Some(forward_info.clone())
3411 if let Some(forward_info) = new_forward {
3412 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3413 &htlc.payment_hash, &self.context.channel_id);
3414 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3415 need_commitment = true;
3418 let mut claimed_htlcs = Vec::new();
3419 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3420 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3421 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3422 &htlc.payment_hash, &self.context.channel_id);
3423 // Grab the preimage, if it exists, instead of cloning
3424 let mut reason = OutboundHTLCOutcome::Success(None);
3425 mem::swap(outcome, &mut reason);
3426 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3427 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3428 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3429 // have a `Success(None)` reason. In this case we could forget some HTLC
3430 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3431 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3433 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3435 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3436 need_commitment = true;
3440 self.context.latest_monitor_update_id += 1;
3441 let mut monitor_update = ChannelMonitorUpdate {
3442 update_id: self.context.latest_monitor_update_id,
3443 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3444 commitment_tx: holder_commitment_tx,
3445 htlc_outputs: htlcs_and_sigs,
3447 nondust_htlc_sources,
3451 self.context.cur_holder_commitment_transaction_number -= 1;
3452 self.context.expecting_peer_commitment_signed = false;
3453 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3454 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3455 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3457 if self.context.channel_state.is_monitor_update_in_progress() {
3458 // In case we initially failed monitor updating without requiring a response, we need
3459 // to make sure the RAA gets sent first.
3460 self.context.monitor_pending_revoke_and_ack = true;
3461 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3462 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3463 // the corresponding HTLC status updates so that
3464 // get_last_commitment_update_for_send includes the right HTLCs.
3465 self.context.monitor_pending_commitment_signed = true;
3466 let mut additional_update = self.build_commitment_no_status_check(logger);
3467 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3468 // strictly increasing by one, so decrement it here.
3469 self.context.latest_monitor_update_id = monitor_update.update_id;
3470 monitor_update.updates.append(&mut additional_update.updates);
3472 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3473 &self.context.channel_id);
3474 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3477 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3478 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3479 // we'll send one right away when we get the revoke_and_ack when we
3480 // free_holding_cell_htlcs().
3481 let mut additional_update = self.build_commitment_no_status_check(logger);
3482 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3483 // strictly increasing by one, so decrement it here.
3484 self.context.latest_monitor_update_id = monitor_update.update_id;
3485 monitor_update.updates.append(&mut additional_update.updates);
3489 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3490 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3491 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3492 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3495 /// Public version of the below, checking relevant preconditions first.
3496 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3497 /// returns `(None, Vec::new())`.
3498 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3499 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3500 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3501 where F::Target: FeeEstimator, L::Target: Logger
3503 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3504 self.free_holding_cell_htlcs(fee_estimator, logger)
3505 } else { (None, Vec::new()) }
3508 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3509 /// for our counterparty.
3510 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3511 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3512 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3513 where F::Target: FeeEstimator, L::Target: Logger
3515 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3516 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3517 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3518 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3520 let mut monitor_update = ChannelMonitorUpdate {
3521 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3522 updates: Vec::new(),
3525 let mut htlc_updates = Vec::new();
3526 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3527 let mut update_add_count = 0;
3528 let mut update_fulfill_count = 0;
3529 let mut update_fail_count = 0;
3530 let mut htlcs_to_fail = Vec::new();
3531 for htlc_update in htlc_updates.drain(..) {
3532 // Note that this *can* fail, though it should be due to rather-rare conditions on
3533 // fee races with adding too many outputs which push our total payments just over
3534 // the limit. In case it's less rare than I anticipate, we may want to revisit
3535 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3536 // to rebalance channels.
3537 match &htlc_update {
3538 &HTLCUpdateAwaitingACK::AddHTLC {
3539 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3540 skimmed_fee_msat, blinding_point, ..
3542 match self.send_htlc(
3543 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3544 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3546 Ok(_) => update_add_count += 1,
3549 ChannelError::Ignore(ref msg) => {
3550 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3551 // If we fail to send here, then this HTLC should
3552 // be failed backwards. Failing to send here
3553 // indicates that this HTLC may keep being put back
3554 // into the holding cell without ever being
3555 // successfully forwarded/failed/fulfilled, causing
3556 // our counterparty to eventually close on us.
3557 htlcs_to_fail.push((source.clone(), *payment_hash));
3560 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3566 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3567 // If an HTLC claim was previously added to the holding cell (via
3568 // `get_update_fulfill_htlc`, then generating the claim message itself must
3569 // not fail - any in between attempts to claim the HTLC will have resulted
3570 // in it hitting the holding cell again and we cannot change the state of a
3571 // holding cell HTLC from fulfill to anything else.
3572 let mut additional_monitor_update =
3573 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3574 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3575 { monitor_update } else { unreachable!() };
3576 update_fulfill_count += 1;
3577 monitor_update.updates.append(&mut additional_monitor_update.updates);
3579 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3580 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3581 Ok(update_fail_msg_option) => {
3582 // If an HTLC failure was previously added to the holding cell (via
3583 // `queue_fail_htlc`) then generating the fail message itself must
3584 // not fail - we should never end up in a state where we double-fail
3585 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3586 // for a full revocation before failing.
3587 debug_assert!(update_fail_msg_option.is_some());
3588 update_fail_count += 1;
3591 if let ChannelError::Ignore(_) = e {}
3593 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3600 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3601 return (None, htlcs_to_fail);
3603 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3604 self.send_update_fee(feerate, false, fee_estimator, logger)
3609 let mut additional_update = self.build_commitment_no_status_check(logger);
3610 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3611 // but we want them to be strictly increasing by one, so reset it here.
3612 self.context.latest_monitor_update_id = monitor_update.update_id;
3613 monitor_update.updates.append(&mut additional_update.updates);
3615 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3616 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3617 update_add_count, update_fulfill_count, update_fail_count);
3619 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3620 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3626 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3627 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3628 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3629 /// generating an appropriate error *after* the channel state has been updated based on the
3630 /// revoke_and_ack message.
3631 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3632 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3633 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3634 where F::Target: FeeEstimator, L::Target: Logger,
3636 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3637 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3639 if self.context.channel_state.is_peer_disconnected() {
3640 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3642 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3643 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3646 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3648 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3649 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3650 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3654 if !self.context.channel_state.is_awaiting_remote_revoke() {
3655 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3656 // haven't given them a new commitment transaction to broadcast). We should probably
3657 // take advantage of this by updating our channel monitor, sending them an error, and
3658 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3659 // lot of work, and there's some chance this is all a misunderstanding anyway.
3660 // We have to do *something*, though, since our signer may get mad at us for otherwise
3661 // jumping a remote commitment number, so best to just force-close and move on.
3662 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3665 #[cfg(any(test, fuzzing))]
3667 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3668 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3671 match &self.context.holder_signer {
3672 ChannelSignerType::Ecdsa(ecdsa) => {
3673 ecdsa.validate_counterparty_revocation(
3674 self.context.cur_counterparty_commitment_transaction_number + 1,
3676 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3678 // TODO (taproot|arik)
3683 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3684 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3685 self.context.latest_monitor_update_id += 1;
3686 let mut monitor_update = ChannelMonitorUpdate {
3687 update_id: self.context.latest_monitor_update_id,
3688 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3689 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3690 secret: msg.per_commitment_secret,
3694 // Update state now that we've passed all the can-fail calls...
3695 // (note that we may still fail to generate the new commitment_signed message, but that's
3696 // OK, we step the channel here and *then* if the new generation fails we can fail the
3697 // channel based on that, but stepping stuff here should be safe either way.
3698 self.context.channel_state.clear_awaiting_remote_revoke();
3699 self.context.sent_message_awaiting_response = None;
3700 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3701 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3702 self.context.cur_counterparty_commitment_transaction_number -= 1;
3704 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3705 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3708 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3709 let mut to_forward_infos = Vec::new();
3710 let mut revoked_htlcs = Vec::new();
3711 let mut finalized_claimed_htlcs = Vec::new();
3712 let mut update_fail_htlcs = Vec::new();
3713 let mut update_fail_malformed_htlcs = Vec::new();
3714 let mut require_commitment = false;
3715 let mut value_to_self_msat_diff: i64 = 0;
3718 // Take references explicitly so that we can hold multiple references to self.context.
3719 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3720 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3721 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3723 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3724 pending_inbound_htlcs.retain(|htlc| {
3725 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3726 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3727 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3728 value_to_self_msat_diff += htlc.amount_msat as i64;
3730 *expecting_peer_commitment_signed = true;
3734 pending_outbound_htlcs.retain(|htlc| {
3735 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3736 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3737 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3738 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3740 finalized_claimed_htlcs.push(htlc.source.clone());
3741 // They fulfilled, so we sent them money
3742 value_to_self_msat_diff -= htlc.amount_msat as i64;
3747 for htlc in pending_inbound_htlcs.iter_mut() {
3748 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3750 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3754 let mut state = InboundHTLCState::Committed;
3755 mem::swap(&mut state, &mut htlc.state);
3757 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3758 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3759 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3760 require_commitment = true;
3761 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3762 match forward_info {
3763 PendingHTLCStatus::Fail(fail_msg) => {
3764 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3765 require_commitment = true;
3767 HTLCFailureMsg::Relay(msg) => {
3768 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3769 update_fail_htlcs.push(msg)
3771 HTLCFailureMsg::Malformed(msg) => {
3772 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3773 update_fail_malformed_htlcs.push(msg)
3777 PendingHTLCStatus::Forward(forward_info) => {
3778 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3779 to_forward_infos.push((forward_info, htlc.htlc_id));
3780 htlc.state = InboundHTLCState::Committed;
3786 for htlc in pending_outbound_htlcs.iter_mut() {
3787 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3788 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3789 htlc.state = OutboundHTLCState::Committed;
3790 *expecting_peer_commitment_signed = true;
3792 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3793 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3794 // Grab the preimage, if it exists, instead of cloning
3795 let mut reason = OutboundHTLCOutcome::Success(None);
3796 mem::swap(outcome, &mut reason);
3797 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3798 require_commitment = true;
3802 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3804 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3805 match update_state {
3806 FeeUpdateState::Outbound => {
3807 debug_assert!(self.context.is_outbound());
3808 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3809 self.context.feerate_per_kw = feerate;
3810 self.context.pending_update_fee = None;
3811 self.context.expecting_peer_commitment_signed = true;
3813 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3814 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3815 debug_assert!(!self.context.is_outbound());
3816 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3817 require_commitment = true;
3818 self.context.feerate_per_kw = feerate;
3819 self.context.pending_update_fee = None;
3824 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3825 let release_state_str =
3826 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3827 macro_rules! return_with_htlcs_to_fail {
3828 ($htlcs_to_fail: expr) => {
3829 if !release_monitor {
3830 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3831 update: monitor_update,
3833 return Ok(($htlcs_to_fail, None));
3835 return Ok(($htlcs_to_fail, Some(monitor_update)));
3840 if self.context.channel_state.is_monitor_update_in_progress() {
3841 // We can't actually generate a new commitment transaction (incl by freeing holding
3842 // cells) while we can't update the monitor, so we just return what we have.
3843 if require_commitment {
3844 self.context.monitor_pending_commitment_signed = true;
3845 // When the monitor updating is restored we'll call
3846 // get_last_commitment_update_for_send(), which does not update state, but we're
3847 // definitely now awaiting a remote revoke before we can step forward any more, so
3849 let mut additional_update = self.build_commitment_no_status_check(logger);
3850 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3851 // strictly increasing by one, so decrement it here.
3852 self.context.latest_monitor_update_id = monitor_update.update_id;
3853 monitor_update.updates.append(&mut additional_update.updates);
3855 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3856 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3857 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3858 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3859 return_with_htlcs_to_fail!(Vec::new());
3862 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3863 (Some(mut additional_update), htlcs_to_fail) => {
3864 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3865 // strictly increasing by one, so decrement it here.
3866 self.context.latest_monitor_update_id = monitor_update.update_id;
3867 monitor_update.updates.append(&mut additional_update.updates);
3869 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3870 &self.context.channel_id(), release_state_str);
3872 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3873 return_with_htlcs_to_fail!(htlcs_to_fail);
3875 (None, htlcs_to_fail) => {
3876 if require_commitment {
3877 let mut additional_update = self.build_commitment_no_status_check(logger);
3879 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3880 // strictly increasing by one, so decrement it here.
3881 self.context.latest_monitor_update_id = monitor_update.update_id;
3882 monitor_update.updates.append(&mut additional_update.updates);
3884 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3885 &self.context.channel_id(),
3886 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3889 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3890 return_with_htlcs_to_fail!(htlcs_to_fail);
3892 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3893 &self.context.channel_id(), release_state_str);
3895 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3896 return_with_htlcs_to_fail!(htlcs_to_fail);
3902 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3903 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3904 /// commitment update.
3905 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3906 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3907 where F::Target: FeeEstimator, L::Target: Logger
3909 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3910 assert!(msg_opt.is_none(), "We forced holding cell?");
3913 /// Adds a pending update to this channel. See the doc for send_htlc for
3914 /// further details on the optionness of the return value.
3915 /// If our balance is too low to cover the cost of the next commitment transaction at the
3916 /// new feerate, the update is cancelled.
3918 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3919 /// [`Channel`] if `force_holding_cell` is false.
3920 fn send_update_fee<F: Deref, L: Deref>(
3921 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3922 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3923 ) -> Option<msgs::UpdateFee>
3924 where F::Target: FeeEstimator, L::Target: Logger
3926 if !self.context.is_outbound() {
3927 panic!("Cannot send fee from inbound channel");
3929 if !self.context.is_usable() {
3930 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3932 if !self.context.is_live() {
3933 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3936 // Before proposing a feerate update, check that we can actually afford the new fee.
3937 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3938 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3939 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3940 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3941 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3942 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3943 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3944 //TODO: auto-close after a number of failures?
3945 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3949 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3950 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3951 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3952 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3953 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3954 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3957 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3958 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3962 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
3963 force_holding_cell = true;
3966 if force_holding_cell {
3967 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3971 debug_assert!(self.context.pending_update_fee.is_none());
3972 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3974 Some(msgs::UpdateFee {
3975 channel_id: self.context.channel_id,
3980 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3981 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3983 /// No further message handling calls may be made until a channel_reestablish dance has
3985 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3986 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3987 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
3988 if self.context.channel_state.is_pre_funded_state() {
3992 if self.context.channel_state.is_peer_disconnected() {
3993 // While the below code should be idempotent, it's simpler to just return early, as
3994 // redundant disconnect events can fire, though they should be rare.
3998 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3999 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4002 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4003 // will be retransmitted.
4004 self.context.last_sent_closing_fee = None;
4005 self.context.pending_counterparty_closing_signed = None;
4006 self.context.closing_fee_limits = None;
4008 let mut inbound_drop_count = 0;
4009 self.context.pending_inbound_htlcs.retain(|htlc| {
4011 InboundHTLCState::RemoteAnnounced(_) => {
4012 // They sent us an update_add_htlc but we never got the commitment_signed.
4013 // We'll tell them what commitment_signed we're expecting next and they'll drop
4014 // this HTLC accordingly
4015 inbound_drop_count += 1;
4018 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4019 // We received a commitment_signed updating this HTLC and (at least hopefully)
4020 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4021 // in response to it yet, so don't touch it.
4024 InboundHTLCState::Committed => true,
4025 InboundHTLCState::LocalRemoved(_) => {
4026 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4027 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4028 // (that we missed). Keep this around for now and if they tell us they missed
4029 // the commitment_signed we can re-transmit the update then.
4034 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4036 if let Some((_, update_state)) = self.context.pending_update_fee {
4037 if update_state == FeeUpdateState::RemoteAnnounced {
4038 debug_assert!(!self.context.is_outbound());
4039 self.context.pending_update_fee = None;
4043 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4044 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4045 // They sent us an update to remove this but haven't yet sent the corresponding
4046 // commitment_signed, we need to move it back to Committed and they can re-send
4047 // the update upon reconnection.
4048 htlc.state = OutboundHTLCState::Committed;
4052 self.context.sent_message_awaiting_response = None;
4054 self.context.channel_state.set_peer_disconnected();
4055 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4059 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4060 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4061 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4062 /// update completes (potentially immediately).
4063 /// The messages which were generated with the monitor update must *not* have been sent to the
4064 /// remote end, and must instead have been dropped. They will be regenerated when
4065 /// [`Self::monitor_updating_restored`] is called.
4067 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4068 /// [`chain::Watch`]: crate::chain::Watch
4069 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4070 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4071 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4072 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4073 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4075 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4076 self.context.monitor_pending_commitment_signed |= resend_commitment;
4077 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4078 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4079 self.context.monitor_pending_failures.append(&mut pending_fails);
4080 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4081 self.context.channel_state.set_monitor_update_in_progress();
4084 /// Indicates that the latest ChannelMonitor update has been committed by the client
4085 /// successfully and we should restore normal operation. Returns messages which should be sent
4086 /// to the remote side.
4087 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4088 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4089 user_config: &UserConfig, best_block_height: u32
4090 ) -> MonitorRestoreUpdates
4093 NS::Target: NodeSigner
4095 assert!(self.context.channel_state.is_monitor_update_in_progress());
4096 self.context.channel_state.clear_monitor_update_in_progress();
4098 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4099 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4100 // first received the funding_signed.
4101 let mut funding_broadcastable =
4102 if self.context.is_outbound() &&
4103 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4104 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4106 self.context.funding_transaction.take()
4108 // That said, if the funding transaction is already confirmed (ie we're active with a
4109 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4110 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4111 funding_broadcastable = None;
4114 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4115 // (and we assume the user never directly broadcasts the funding transaction and waits for
4116 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4117 // * an inbound channel that failed to persist the monitor on funding_created and we got
4118 // the funding transaction confirmed before the monitor was persisted, or
4119 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4120 let channel_ready = if self.context.monitor_pending_channel_ready {
4121 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4122 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4123 self.context.monitor_pending_channel_ready = false;
4124 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4125 Some(msgs::ChannelReady {
4126 channel_id: self.context.channel_id(),
4127 next_per_commitment_point,
4128 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4132 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4134 let mut accepted_htlcs = Vec::new();
4135 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4136 let mut failed_htlcs = Vec::new();
4137 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4138 let mut finalized_claimed_htlcs = Vec::new();
4139 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4141 if self.context.channel_state.is_peer_disconnected() {
4142 self.context.monitor_pending_revoke_and_ack = false;
4143 self.context.monitor_pending_commitment_signed = false;
4144 return MonitorRestoreUpdates {
4145 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4146 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4150 let raa = if self.context.monitor_pending_revoke_and_ack {
4151 Some(self.get_last_revoke_and_ack())
4153 let commitment_update = if self.context.monitor_pending_commitment_signed {
4154 self.get_last_commitment_update_for_send(logger).ok()
4156 if commitment_update.is_some() {
4157 self.mark_awaiting_response();
4160 self.context.monitor_pending_revoke_and_ack = false;
4161 self.context.monitor_pending_commitment_signed = false;
4162 let order = self.context.resend_order.clone();
4163 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4164 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4165 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4166 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4167 MonitorRestoreUpdates {
4168 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4172 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4173 where F::Target: FeeEstimator, L::Target: Logger
4175 if self.context.is_outbound() {
4176 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4178 if self.context.channel_state.is_peer_disconnected() {
4179 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4181 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4183 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4184 self.context.update_time_counter += 1;
4185 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4186 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4187 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4188 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4189 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4190 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4191 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4192 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4193 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4194 msg.feerate_per_kw, holder_tx_dust_exposure)));
4196 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4197 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4198 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4204 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4207 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4208 let commitment_update = if self.context.signer_pending_commitment_update {
4209 self.get_last_commitment_update_for_send(logger).ok()
4211 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4212 self.context.get_funding_signed_msg(logger).1
4214 let channel_ready = if funding_signed.is_some() {
4215 self.check_get_channel_ready(0)
4218 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4219 if commitment_update.is_some() { "a" } else { "no" },
4220 if funding_signed.is_some() { "a" } else { "no" },
4221 if channel_ready.is_some() { "a" } else { "no" });
4223 SignerResumeUpdates {
4230 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4231 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4232 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4233 msgs::RevokeAndACK {
4234 channel_id: self.context.channel_id,
4235 per_commitment_secret,
4236 next_per_commitment_point,
4238 next_local_nonce: None,
4242 /// Gets the last commitment update for immediate sending to our peer.
4243 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4244 let mut update_add_htlcs = Vec::new();
4245 let mut update_fulfill_htlcs = Vec::new();
4246 let mut update_fail_htlcs = Vec::new();
4247 let mut update_fail_malformed_htlcs = Vec::new();
4249 for htlc in self.context.pending_outbound_htlcs.iter() {
4250 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4251 update_add_htlcs.push(msgs::UpdateAddHTLC {
4252 channel_id: self.context.channel_id(),
4253 htlc_id: htlc.htlc_id,
4254 amount_msat: htlc.amount_msat,
4255 payment_hash: htlc.payment_hash,
4256 cltv_expiry: htlc.cltv_expiry,
4257 onion_routing_packet: (**onion_packet).clone(),
4258 skimmed_fee_msat: htlc.skimmed_fee_msat,
4259 blinding_point: htlc.blinding_point,
4264 for htlc in self.context.pending_inbound_htlcs.iter() {
4265 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4267 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4268 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4269 channel_id: self.context.channel_id(),
4270 htlc_id: htlc.htlc_id,
4271 reason: err_packet.clone()
4274 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4275 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4276 channel_id: self.context.channel_id(),
4277 htlc_id: htlc.htlc_id,
4278 sha256_of_onion: sha256_of_onion.clone(),
4279 failure_code: failure_code.clone(),
4282 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4283 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4284 channel_id: self.context.channel_id(),
4285 htlc_id: htlc.htlc_id,
4286 payment_preimage: payment_preimage.clone(),
4293 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4294 Some(msgs::UpdateFee {
4295 channel_id: self.context.channel_id(),
4296 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4300 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4301 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4302 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4303 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4304 if self.context.signer_pending_commitment_update {
4305 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4306 self.context.signer_pending_commitment_update = false;
4310 if !self.context.signer_pending_commitment_update {
4311 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4312 self.context.signer_pending_commitment_update = true;
4316 Ok(msgs::CommitmentUpdate {
4317 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4322 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4323 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4324 if self.context.channel_state.is_local_shutdown_sent() {
4325 assert!(self.context.shutdown_scriptpubkey.is_some());
4326 Some(msgs::Shutdown {
4327 channel_id: self.context.channel_id,
4328 scriptpubkey: self.get_closing_scriptpubkey(),
4333 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4334 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4336 /// Some links printed in log lines are included here to check them during build (when run with
4337 /// `cargo doc --document-private-items`):
4338 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4339 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4340 pub fn channel_reestablish<L: Deref, NS: Deref>(
4341 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4342 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4343 ) -> Result<ReestablishResponses, ChannelError>
4346 NS::Target: NodeSigner
4348 if !self.context.channel_state.is_peer_disconnected() {
4349 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4350 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4351 // just close here instead of trying to recover.
4352 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4355 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4356 msg.next_local_commitment_number == 0 {
4357 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4360 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4361 if msg.next_remote_commitment_number > 0 {
4362 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4363 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4364 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4365 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4366 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4368 if msg.next_remote_commitment_number > our_commitment_transaction {
4369 macro_rules! log_and_panic {
4370 ($err_msg: expr) => {
4371 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4372 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4375 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4376 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4377 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4378 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4379 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4380 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4381 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4382 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4386 // Before we change the state of the channel, we check if the peer is sending a very old
4387 // commitment transaction number, if yes we send a warning message.
4388 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4389 return Err(ChannelError::Warn(format!(
4390 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4391 msg.next_remote_commitment_number,
4392 our_commitment_transaction
4396 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4397 // remaining cases either succeed or ErrorMessage-fail).
4398 self.context.channel_state.clear_peer_disconnected();
4399 self.context.sent_message_awaiting_response = None;
4401 let shutdown_msg = self.get_outbound_shutdown();
4403 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4405 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4406 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4407 if !self.context.channel_state.is_our_channel_ready() ||
4408 self.context.channel_state.is_monitor_update_in_progress() {
4409 if msg.next_remote_commitment_number != 0 {
4410 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4412 // Short circuit the whole handler as there is nothing we can resend them
4413 return Ok(ReestablishResponses {
4414 channel_ready: None,
4415 raa: None, commitment_update: None,
4416 order: RAACommitmentOrder::CommitmentFirst,
4417 shutdown_msg, announcement_sigs,
4421 // We have OurChannelReady set!
4422 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4423 return Ok(ReestablishResponses {
4424 channel_ready: Some(msgs::ChannelReady {
4425 channel_id: self.context.channel_id(),
4426 next_per_commitment_point,
4427 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4429 raa: None, commitment_update: None,
4430 order: RAACommitmentOrder::CommitmentFirst,
4431 shutdown_msg, announcement_sigs,
4435 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4436 // Remote isn't waiting on any RevokeAndACK from us!
4437 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4439 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4440 if self.context.channel_state.is_monitor_update_in_progress() {
4441 self.context.monitor_pending_revoke_and_ack = true;
4444 Some(self.get_last_revoke_and_ack())
4447 debug_assert!(false, "All values should have been handled in the four cases above");
4448 return Err(ChannelError::Close(format!(
4449 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4450 msg.next_remote_commitment_number,
4451 our_commitment_transaction
4455 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4456 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4457 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4458 // the corresponding revoke_and_ack back yet.
4459 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4460 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4461 self.mark_awaiting_response();
4463 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4465 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4466 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4467 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4468 Some(msgs::ChannelReady {
4469 channel_id: self.context.channel_id(),
4470 next_per_commitment_point,
4471 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4475 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4476 if required_revoke.is_some() {
4477 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4479 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4482 Ok(ReestablishResponses {
4483 channel_ready, shutdown_msg, announcement_sigs,
4484 raa: required_revoke,
4485 commitment_update: None,
4486 order: self.context.resend_order.clone(),
4488 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4489 if required_revoke.is_some() {
4490 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4492 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4495 if self.context.channel_state.is_monitor_update_in_progress() {
4496 self.context.monitor_pending_commitment_signed = true;
4497 Ok(ReestablishResponses {
4498 channel_ready, shutdown_msg, announcement_sigs,
4499 commitment_update: None, raa: None,
4500 order: self.context.resend_order.clone(),
4503 Ok(ReestablishResponses {
4504 channel_ready, shutdown_msg, announcement_sigs,
4505 raa: required_revoke,
4506 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4507 order: self.context.resend_order.clone(),
4510 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4511 Err(ChannelError::Close(format!(
4512 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4513 msg.next_local_commitment_number,
4514 next_counterparty_commitment_number,
4517 Err(ChannelError::Close(format!(
4518 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4519 msg.next_local_commitment_number,
4520 next_counterparty_commitment_number,
4525 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4526 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4527 /// at which point they will be recalculated.
4528 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4530 where F::Target: FeeEstimator
4532 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4534 // Propose a range from our current Background feerate to our Normal feerate plus our
4535 // force_close_avoidance_max_fee_satoshis.
4536 // If we fail to come to consensus, we'll have to force-close.
4537 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4538 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4539 // that we don't expect to need fee bumping
4540 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4541 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4543 // The spec requires that (when the channel does not have anchors) we only send absolute
4544 // channel fees no greater than the absolute channel fee on the current commitment
4545 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4546 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4547 // some force-closure by old nodes, but we wanted to close the channel anyway.
4549 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4550 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4551 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4552 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4555 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4556 // below our dust limit, causing the output to disappear. We don't bother handling this
4557 // case, however, as this should only happen if a channel is closed before any (material)
4558 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4559 // come to consensus with our counterparty on appropriate fees, however it should be a
4560 // relatively rare case. We can revisit this later, though note that in order to determine
4561 // if the funders' output is dust we have to know the absolute fee we're going to use.
4562 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4563 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4564 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4565 // We always add force_close_avoidance_max_fee_satoshis to our normal
4566 // feerate-calculated fee, but allow the max to be overridden if we're using a
4567 // target feerate-calculated fee.
4568 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4569 proposed_max_feerate as u64 * tx_weight / 1000)
4571 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4574 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4575 self.context.closing_fee_limits.clone().unwrap()
4578 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4579 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4580 /// this point if we're the funder we should send the initial closing_signed, and in any case
4581 /// shutdown should complete within a reasonable timeframe.
4582 fn closing_negotiation_ready(&self) -> bool {
4583 self.context.closing_negotiation_ready()
4586 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4587 /// an Err if no progress is being made and the channel should be force-closed instead.
4588 /// Should be called on a one-minute timer.
4589 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4590 if self.closing_negotiation_ready() {
4591 if self.context.closing_signed_in_flight {
4592 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4594 self.context.closing_signed_in_flight = true;
4600 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4601 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4602 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4603 where F::Target: FeeEstimator, L::Target: Logger
4605 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4606 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4607 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4608 // that closing_negotiation_ready checks this case (as well as a few others).
4609 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4610 return Ok((None, None, None));
4613 if !self.context.is_outbound() {
4614 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4615 return self.closing_signed(fee_estimator, &msg);
4617 return Ok((None, None, None));
4620 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4621 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4622 if self.context.expecting_peer_commitment_signed {
4623 return Ok((None, None, None));
4626 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4628 assert!(self.context.shutdown_scriptpubkey.is_some());
4629 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4630 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4631 our_min_fee, our_max_fee, total_fee_satoshis);
4633 match &self.context.holder_signer {
4634 ChannelSignerType::Ecdsa(ecdsa) => {
4636 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4637 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4639 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4640 Ok((Some(msgs::ClosingSigned {
4641 channel_id: self.context.channel_id,
4642 fee_satoshis: total_fee_satoshis,
4644 fee_range: Some(msgs::ClosingSignedFeeRange {
4645 min_fee_satoshis: our_min_fee,
4646 max_fee_satoshis: our_max_fee,
4650 // TODO (taproot|arik)
4656 // Marks a channel as waiting for a response from the counterparty. If it's not received
4657 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4659 fn mark_awaiting_response(&mut self) {
4660 self.context.sent_message_awaiting_response = Some(0);
4663 /// Determines whether we should disconnect the counterparty due to not receiving a response
4664 /// within our expected timeframe.
4666 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4667 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4668 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4671 // Don't disconnect when we're not waiting on a response.
4674 *ticks_elapsed += 1;
4675 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4679 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4680 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4682 if self.context.channel_state.is_peer_disconnected() {
4683 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4685 if self.context.channel_state.is_pre_funded_state() {
4686 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4687 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4688 // can do that via error message without getting a connection fail anyway...
4689 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4691 for htlc in self.context.pending_inbound_htlcs.iter() {
4692 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4693 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4696 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4698 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4699 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4702 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4703 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4704 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4707 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4710 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4711 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4712 // any further commitment updates after we set LocalShutdownSent.
4713 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4715 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4718 assert!(send_shutdown);
4719 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4720 Ok(scriptpubkey) => scriptpubkey,
4721 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4723 if !shutdown_scriptpubkey.is_compatible(their_features) {
4724 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4726 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4731 // From here on out, we may not fail!
4733 self.context.channel_state.set_remote_shutdown_sent();
4734 self.context.update_time_counter += 1;
4736 let monitor_update = if update_shutdown_script {
4737 self.context.latest_monitor_update_id += 1;
4738 let monitor_update = ChannelMonitorUpdate {
4739 update_id: self.context.latest_monitor_update_id,
4740 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4741 scriptpubkey: self.get_closing_scriptpubkey(),
4744 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4745 self.push_ret_blockable_mon_update(monitor_update)
4747 let shutdown = if send_shutdown {
4748 Some(msgs::Shutdown {
4749 channel_id: self.context.channel_id,
4750 scriptpubkey: self.get_closing_scriptpubkey(),
4754 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4755 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4756 // cell HTLCs and return them to fail the payment.
4757 self.context.holding_cell_update_fee = None;
4758 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4759 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4761 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4762 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4769 self.context.channel_state.set_local_shutdown_sent();
4770 self.context.update_time_counter += 1;
4772 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4775 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4776 let mut tx = closing_tx.trust().built_transaction().clone();
4778 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4780 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4781 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4782 let mut holder_sig = sig.serialize_der().to_vec();
4783 holder_sig.push(EcdsaSighashType::All as u8);
4784 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4785 cp_sig.push(EcdsaSighashType::All as u8);
4786 if funding_key[..] < counterparty_funding_key[..] {
4787 tx.input[0].witness.push(holder_sig);
4788 tx.input[0].witness.push(cp_sig);
4790 tx.input[0].witness.push(cp_sig);
4791 tx.input[0].witness.push(holder_sig);
4794 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4798 pub fn closing_signed<F: Deref>(
4799 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4800 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4801 where F::Target: FeeEstimator
4803 if !self.context.channel_state.is_both_sides_shutdown() {
4804 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4806 if self.context.channel_state.is_peer_disconnected() {
4807 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4809 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4810 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4812 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4813 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4816 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4817 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4820 if self.context.channel_state.is_monitor_update_in_progress() {
4821 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4822 return Ok((None, None, None));
4825 let funding_redeemscript = self.context.get_funding_redeemscript();
4826 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4827 if used_total_fee != msg.fee_satoshis {
4828 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4830 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4832 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4835 // The remote end may have decided to revoke their output due to inconsistent dust
4836 // limits, so check for that case by re-checking the signature here.
4837 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4838 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4839 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4843 for outp in closing_tx.trust().built_transaction().output.iter() {
4844 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4845 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4849 assert!(self.context.shutdown_scriptpubkey.is_some());
4850 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4851 if last_fee == msg.fee_satoshis {
4852 let shutdown_result = ShutdownResult {
4853 monitor_update: None,
4854 dropped_outbound_htlcs: Vec::new(),
4855 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4856 channel_id: self.context.channel_id,
4857 counterparty_node_id: self.context.counterparty_node_id,
4859 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4860 self.context.channel_state = ChannelState::ShutdownComplete;
4861 self.context.update_time_counter += 1;
4862 return Ok((None, Some(tx), Some(shutdown_result)));
4866 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4868 macro_rules! propose_fee {
4869 ($new_fee: expr) => {
4870 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4871 (closing_tx, $new_fee)
4873 self.build_closing_transaction($new_fee, false)
4876 return match &self.context.holder_signer {
4877 ChannelSignerType::Ecdsa(ecdsa) => {
4879 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4880 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4881 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4882 let shutdown_result = ShutdownResult {
4883 monitor_update: None,
4884 dropped_outbound_htlcs: Vec::new(),
4885 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4886 channel_id: self.context.channel_id,
4887 counterparty_node_id: self.context.counterparty_node_id,
4889 self.context.channel_state = ChannelState::ShutdownComplete;
4890 self.context.update_time_counter += 1;
4891 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4892 (Some(tx), Some(shutdown_result))
4897 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4898 Ok((Some(msgs::ClosingSigned {
4899 channel_id: self.context.channel_id,
4900 fee_satoshis: used_fee,
4902 fee_range: Some(msgs::ClosingSignedFeeRange {
4903 min_fee_satoshis: our_min_fee,
4904 max_fee_satoshis: our_max_fee,
4906 }), signed_tx, shutdown_result))
4908 // TODO (taproot|arik)
4915 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4916 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4917 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4919 if max_fee_satoshis < our_min_fee {
4920 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4922 if min_fee_satoshis > our_max_fee {
4923 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4926 if !self.context.is_outbound() {
4927 // They have to pay, so pick the highest fee in the overlapping range.
4928 // We should never set an upper bound aside from their full balance
4929 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4930 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4932 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4933 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4934 msg.fee_satoshis, our_min_fee, our_max_fee)));
4936 // The proposed fee is in our acceptable range, accept it and broadcast!
4937 propose_fee!(msg.fee_satoshis);
4940 // Old fee style negotiation. We don't bother to enforce whether they are complying
4941 // with the "making progress" requirements, we just comply and hope for the best.
4942 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4943 if msg.fee_satoshis > last_fee {
4944 if msg.fee_satoshis < our_max_fee {
4945 propose_fee!(msg.fee_satoshis);
4946 } else if last_fee < our_max_fee {
4947 propose_fee!(our_max_fee);
4949 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4952 if msg.fee_satoshis > our_min_fee {
4953 propose_fee!(msg.fee_satoshis);
4954 } else if last_fee > our_min_fee {
4955 propose_fee!(our_min_fee);
4957 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4961 if msg.fee_satoshis < our_min_fee {
4962 propose_fee!(our_min_fee);
4963 } else if msg.fee_satoshis > our_max_fee {
4964 propose_fee!(our_max_fee);
4966 propose_fee!(msg.fee_satoshis);
4972 fn internal_htlc_satisfies_config(
4973 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4974 ) -> Result<(), (&'static str, u16)> {
4975 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4976 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4977 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4978 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4980 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4981 0x1000 | 12, // fee_insufficient
4984 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4986 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4987 0x1000 | 13, // incorrect_cltv_expiry
4993 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4994 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4995 /// unsuccessful, falls back to the previous one if one exists.
4996 pub fn htlc_satisfies_config(
4997 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4998 ) -> Result<(), (&'static str, u16)> {
4999 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5001 if let Some(prev_config) = self.context.prev_config() {
5002 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5009 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5010 self.context.cur_holder_commitment_transaction_number + 1
5013 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5014 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5017 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5018 self.context.cur_counterparty_commitment_transaction_number + 2
5022 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5023 &self.context.holder_signer
5027 pub fn get_value_stat(&self) -> ChannelValueStat {
5029 value_to_self_msat: self.context.value_to_self_msat,
5030 channel_value_msat: self.context.channel_value_satoshis * 1000,
5031 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5032 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5033 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5034 holding_cell_outbound_amount_msat: {
5036 for h in self.context.holding_cell_htlc_updates.iter() {
5038 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5046 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5047 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5051 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5052 /// Allowed in any state (including after shutdown)
5053 pub fn is_awaiting_monitor_update(&self) -> bool {
5054 self.context.channel_state.is_monitor_update_in_progress()
5057 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5058 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5059 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5060 self.context.blocked_monitor_updates[0].update.update_id - 1
5063 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5064 /// further blocked monitor update exists after the next.
5065 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5066 if self.context.blocked_monitor_updates.is_empty() { return None; }
5067 Some((self.context.blocked_monitor_updates.remove(0).update,
5068 !self.context.blocked_monitor_updates.is_empty()))
5071 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5072 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5073 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5074 -> Option<ChannelMonitorUpdate> {
5075 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5076 if !release_monitor {
5077 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5086 pub fn blocked_monitor_updates_pending(&self) -> usize {
5087 self.context.blocked_monitor_updates.len()
5090 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5091 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5092 /// transaction. If the channel is inbound, this implies simply that the channel has not
5094 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5095 if !self.is_awaiting_monitor_update() { return false; }
5097 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5098 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5100 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5101 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5102 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5105 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5106 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5107 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5108 // waiting for the initial monitor persistence. Thus, we check if our commitment
5109 // transaction numbers have both been iterated only exactly once (for the
5110 // funding_signed), and we're awaiting monitor update.
5112 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5113 // only way to get an awaiting-monitor-update state during initial funding is if the
5114 // initial monitor persistence is still pending).
5116 // Because deciding we're awaiting initial broadcast spuriously could result in
5117 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5118 // we hard-assert here, even in production builds.
5119 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5120 assert!(self.context.monitor_pending_channel_ready);
5121 assert_eq!(self.context.latest_monitor_update_id, 0);
5127 /// Returns true if our channel_ready has been sent
5128 pub fn is_our_channel_ready(&self) -> bool {
5129 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5130 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5133 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5134 pub fn received_shutdown(&self) -> bool {
5135 self.context.channel_state.is_remote_shutdown_sent()
5138 /// Returns true if we either initiated or agreed to shut down the channel.
5139 pub fn sent_shutdown(&self) -> bool {
5140 self.context.channel_state.is_local_shutdown_sent()
5143 /// Returns true if this channel is fully shut down. True here implies that no further actions
5144 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5145 /// will be handled appropriately by the chain monitor.
5146 pub fn is_shutdown(&self) -> bool {
5147 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5150 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5151 self.context.channel_update_status
5154 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5155 self.context.update_time_counter += 1;
5156 self.context.channel_update_status = status;
5159 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5161 // * always when a new block/transactions are confirmed with the new height
5162 // * when funding is signed with a height of 0
5163 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5167 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5168 if funding_tx_confirmations <= 0 {
5169 self.context.funding_tx_confirmation_height = 0;
5172 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5176 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5177 // channel_ready yet.
5178 if self.context.signer_pending_funding {
5182 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5183 // channel_ready until the entire batch is ready.
5184 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5185 self.context.channel_state.set_our_channel_ready();
5187 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5188 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5189 self.context.update_time_counter += 1;
5191 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5192 // We got a reorg but not enough to trigger a force close, just ignore.
5195 if self.context.funding_tx_confirmation_height != 0 &&
5196 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5198 // We should never see a funding transaction on-chain until we've received
5199 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5200 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5201 // however, may do this and we shouldn't treat it as a bug.
5202 #[cfg(not(fuzzing))]
5203 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5204 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5205 self.context.channel_state.to_u32());
5207 // We got a reorg but not enough to trigger a force close, just ignore.
5211 if need_commitment_update {
5212 if !self.context.channel_state.is_monitor_update_in_progress() {
5213 if !self.context.channel_state.is_peer_disconnected() {
5214 let next_per_commitment_point =
5215 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5216 return Some(msgs::ChannelReady {
5217 channel_id: self.context.channel_id,
5218 next_per_commitment_point,
5219 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5223 self.context.monitor_pending_channel_ready = true;
5229 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5230 /// In the first case, we store the confirmation height and calculating the short channel id.
5231 /// In the second, we simply return an Err indicating we need to be force-closed now.
5232 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5233 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5234 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5235 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5237 NS::Target: NodeSigner,
5240 let mut msgs = (None, None);
5241 if let Some(funding_txo) = self.context.get_funding_txo() {
5242 for &(index_in_block, tx) in txdata.iter() {
5243 // Check if the transaction is the expected funding transaction, and if it is,
5244 // check that it pays the right amount to the right script.
5245 if self.context.funding_tx_confirmation_height == 0 {
5246 if tx.txid() == funding_txo.txid {
5247 let txo_idx = funding_txo.index as usize;
5248 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5249 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5250 if self.context.is_outbound() {
5251 // If we generated the funding transaction and it doesn't match what it
5252 // should, the client is really broken and we should just panic and
5253 // tell them off. That said, because hash collisions happen with high
5254 // probability in fuzzing mode, if we're fuzzing we just close the
5255 // channel and move on.
5256 #[cfg(not(fuzzing))]
5257 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5259 self.context.update_time_counter += 1;
5260 let err_reason = "funding tx had wrong script/value or output index";
5261 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5263 if self.context.is_outbound() {
5264 if !tx.is_coin_base() {
5265 for input in tx.input.iter() {
5266 if input.witness.is_empty() {
5267 // We generated a malleable funding transaction, implying we've
5268 // just exposed ourselves to funds loss to our counterparty.
5269 #[cfg(not(fuzzing))]
5270 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5275 self.context.funding_tx_confirmation_height = height;
5276 self.context.funding_tx_confirmed_in = Some(*block_hash);
5277 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5278 Ok(scid) => Some(scid),
5279 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5282 // If this is a coinbase transaction and not a 0-conf channel
5283 // we should update our min_depth to 100 to handle coinbase maturity
5284 if tx.is_coin_base() &&
5285 self.context.minimum_depth.unwrap_or(0) > 0 &&
5286 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5287 self.context.minimum_depth = Some(COINBASE_MATURITY);
5290 // If we allow 1-conf funding, we may need to check for channel_ready here and
5291 // send it immediately instead of waiting for a best_block_updated call (which
5292 // may have already happened for this block).
5293 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5294 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5295 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5296 msgs = (Some(channel_ready), announcement_sigs);
5299 for inp in tx.input.iter() {
5300 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5301 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5302 return Err(ClosureReason::CommitmentTxConfirmed);
5310 /// When a new block is connected, we check the height of the block against outbound holding
5311 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5312 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5313 /// handled by the ChannelMonitor.
5315 /// If we return Err, the channel may have been closed, at which point the standard
5316 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5319 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5321 pub fn best_block_updated<NS: Deref, L: Deref>(
5322 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5323 node_signer: &NS, user_config: &UserConfig, logger: &L
5324 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5326 NS::Target: NodeSigner,
5329 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5332 fn do_best_block_updated<NS: Deref, L: Deref>(
5333 &mut self, height: u32, highest_header_time: u32,
5334 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5335 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5337 NS::Target: NodeSigner,
5340 let mut timed_out_htlcs = Vec::new();
5341 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5342 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5344 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5345 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5347 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5348 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5349 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5357 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5359 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5360 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5361 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5363 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5364 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5367 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5368 self.context.channel_state.is_our_channel_ready() {
5369 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5370 if self.context.funding_tx_confirmation_height == 0 {
5371 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5372 // zero if it has been reorged out, however in either case, our state flags
5373 // indicate we've already sent a channel_ready
5374 funding_tx_confirmations = 0;
5377 // If we've sent channel_ready (or have both sent and received channel_ready), and
5378 // the funding transaction has become unconfirmed,
5379 // close the channel and hope we can get the latest state on chain (because presumably
5380 // the funding transaction is at least still in the mempool of most nodes).
5382 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5383 // 0-conf channel, but not doing so may lead to the
5384 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5386 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5387 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5388 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5389 return Err(ClosureReason::ProcessingError { err: err_reason });
5391 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5392 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5393 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5394 // If funding_tx_confirmed_in is unset, the channel must not be active
5395 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5396 assert!(!self.context.channel_state.is_our_channel_ready());
5397 return Err(ClosureReason::FundingTimedOut);
5400 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5401 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5403 Ok((None, timed_out_htlcs, announcement_sigs))
5406 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5407 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5408 /// before the channel has reached channel_ready and we can just wait for more blocks.
5409 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5410 if self.context.funding_tx_confirmation_height != 0 {
5411 // We handle the funding disconnection by calling best_block_updated with a height one
5412 // below where our funding was connected, implying a reorg back to conf_height - 1.
5413 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5414 // We use the time field to bump the current time we set on channel updates if its
5415 // larger. If we don't know that time has moved forward, we can just set it to the last
5416 // time we saw and it will be ignored.
5417 let best_time = self.context.update_time_counter;
5418 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5419 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5420 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5421 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5422 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5428 // We never learned about the funding confirmation anyway, just ignore
5433 // Methods to get unprompted messages to send to the remote end (or where we already returned
5434 // something in the handler for the message that prompted this message):
5436 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5437 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5438 /// directions). Should be used for both broadcasted announcements and in response to an
5439 /// AnnouncementSignatures message from the remote peer.
5441 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5444 /// This will only return ChannelError::Ignore upon failure.
5446 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5447 fn get_channel_announcement<NS: Deref>(
5448 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5449 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5450 if !self.context.config.announced_channel {
5451 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5453 if !self.context.is_usable() {
5454 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5457 let short_channel_id = self.context.get_short_channel_id()
5458 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5459 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5460 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5461 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5462 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5464 let msg = msgs::UnsignedChannelAnnouncement {
5465 features: channelmanager::provided_channel_features(&user_config),
5468 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5469 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5470 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5471 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5472 excess_data: Vec::new(),
5478 fn get_announcement_sigs<NS: Deref, L: Deref>(
5479 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5480 best_block_height: u32, logger: &L
5481 ) -> Option<msgs::AnnouncementSignatures>
5483 NS::Target: NodeSigner,
5486 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5490 if !self.context.is_usable() {
5494 if self.context.channel_state.is_peer_disconnected() {
5495 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5499 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5503 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5504 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5507 log_trace!(logger, "{:?}", e);
5511 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5513 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5518 match &self.context.holder_signer {
5519 ChannelSignerType::Ecdsa(ecdsa) => {
5520 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5522 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5527 let short_channel_id = match self.context.get_short_channel_id() {
5529 None => return None,
5532 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5534 Some(msgs::AnnouncementSignatures {
5535 channel_id: self.context.channel_id(),
5537 node_signature: our_node_sig,
5538 bitcoin_signature: our_bitcoin_sig,
5541 // TODO (taproot|arik)
5547 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5549 fn sign_channel_announcement<NS: Deref>(
5550 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5551 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5552 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5553 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5554 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5555 let were_node_one = announcement.node_id_1 == our_node_key;
5557 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5558 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5559 match &self.context.holder_signer {
5560 ChannelSignerType::Ecdsa(ecdsa) => {
5561 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5562 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5563 Ok(msgs::ChannelAnnouncement {
5564 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5565 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5566 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5567 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5568 contents: announcement,
5571 // TODO (taproot|arik)
5576 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5580 /// Processes an incoming announcement_signatures message, providing a fully-signed
5581 /// channel_announcement message which we can broadcast and storing our counterparty's
5582 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5583 pub fn announcement_signatures<NS: Deref>(
5584 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5585 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5586 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5587 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5589 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5591 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5592 return Err(ChannelError::Close(format!(
5593 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5594 &announcement, self.context.get_counterparty_node_id())));
5596 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5597 return Err(ChannelError::Close(format!(
5598 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5599 &announcement, self.context.counterparty_funding_pubkey())));
5602 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5603 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5604 return Err(ChannelError::Ignore(
5605 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5608 self.sign_channel_announcement(node_signer, announcement)
5611 /// Gets a signed channel_announcement for this channel, if we previously received an
5612 /// announcement_signatures from our counterparty.
5613 pub fn get_signed_channel_announcement<NS: Deref>(
5614 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5615 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5616 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5619 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5621 Err(_) => return None,
5623 match self.sign_channel_announcement(node_signer, announcement) {
5624 Ok(res) => Some(res),
5629 /// May panic if called on a channel that wasn't immediately-previously
5630 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5631 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5632 assert!(self.context.channel_state.is_peer_disconnected());
5633 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5634 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5635 // current to_remote balances. However, it no longer has any use, and thus is now simply
5636 // set to a dummy (but valid, as required by the spec) public key.
5637 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5638 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5639 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5640 let mut pk = [2; 33]; pk[1] = 0xff;
5641 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5642 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5643 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5644 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5647 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5650 self.mark_awaiting_response();
5651 msgs::ChannelReestablish {
5652 channel_id: self.context.channel_id(),
5653 // The protocol has two different commitment number concepts - the "commitment
5654 // transaction number", which starts from 0 and counts up, and the "revocation key
5655 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5656 // commitment transaction numbers by the index which will be used to reveal the
5657 // revocation key for that commitment transaction, which means we have to convert them
5658 // to protocol-level commitment numbers here...
5660 // next_local_commitment_number is the next commitment_signed number we expect to
5661 // receive (indicating if they need to resend one that we missed).
5662 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5663 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5664 // receive, however we track it by the next commitment number for a remote transaction
5665 // (which is one further, as they always revoke previous commitment transaction, not
5666 // the one we send) so we have to decrement by 1. Note that if
5667 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5668 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5670 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5671 your_last_per_commitment_secret: remote_last_secret,
5672 my_current_per_commitment_point: dummy_pubkey,
5673 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5674 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5675 // txid of that interactive transaction, else we MUST NOT set it.
5676 next_funding_txid: None,
5681 // Send stuff to our remote peers:
5683 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5684 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5685 /// commitment update.
5687 /// `Err`s will only be [`ChannelError::Ignore`].
5688 pub fn queue_add_htlc<F: Deref, L: Deref>(
5689 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5690 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5691 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5692 ) -> Result<(), ChannelError>
5693 where F::Target: FeeEstimator, L::Target: Logger
5696 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5697 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5698 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5700 if let ChannelError::Ignore(_) = err { /* fine */ }
5701 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5706 /// Adds a pending outbound HTLC to this channel, note that you probably want
5707 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5709 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5711 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5712 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5714 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5715 /// we may not yet have sent the previous commitment update messages and will need to
5716 /// regenerate them.
5718 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5719 /// on this [`Channel`] if `force_holding_cell` is false.
5721 /// `Err`s will only be [`ChannelError::Ignore`].
5722 fn send_htlc<F: Deref, L: Deref>(
5723 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5724 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5725 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5726 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5727 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5728 where F::Target: FeeEstimator, L::Target: Logger
5730 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5731 self.context.channel_state.is_local_shutdown_sent() ||
5732 self.context.channel_state.is_remote_shutdown_sent()
5734 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5736 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5737 if amount_msat > channel_total_msat {
5738 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5741 if amount_msat == 0 {
5742 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5745 let available_balances = self.context.get_available_balances(fee_estimator);
5746 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5747 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5748 available_balances.next_outbound_htlc_minimum_msat)));
5751 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5752 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5753 available_balances.next_outbound_htlc_limit_msat)));
5756 if self.context.channel_state.is_peer_disconnected() {
5757 // Note that this should never really happen, if we're !is_live() on receipt of an
5758 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5759 // the user to send directly into a !is_live() channel. However, if we
5760 // disconnected during the time the previous hop was doing the commitment dance we may
5761 // end up getting here after the forwarding delay. In any case, returning an
5762 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5763 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5766 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5767 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5768 payment_hash, amount_msat,
5769 if force_holding_cell { "into holding cell" }
5770 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5771 else { "to peer" });
5773 if need_holding_cell {
5774 force_holding_cell = true;
5777 // Now update local state:
5778 if force_holding_cell {
5779 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5784 onion_routing_packet,
5791 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5792 htlc_id: self.context.next_holder_htlc_id,
5794 payment_hash: payment_hash.clone(),
5796 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5802 let res = msgs::UpdateAddHTLC {
5803 channel_id: self.context.channel_id,
5804 htlc_id: self.context.next_holder_htlc_id,
5808 onion_routing_packet,
5812 self.context.next_holder_htlc_id += 1;
5817 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5818 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5819 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5820 // fail to generate this, we still are at least at a position where upgrading their status
5822 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5823 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5824 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5826 if let Some(state) = new_state {
5827 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5831 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5832 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5833 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5834 // Grab the preimage, if it exists, instead of cloning
5835 let mut reason = OutboundHTLCOutcome::Success(None);
5836 mem::swap(outcome, &mut reason);
5837 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5840 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5841 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5842 debug_assert!(!self.context.is_outbound());
5843 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5844 self.context.feerate_per_kw = feerate;
5845 self.context.pending_update_fee = None;
5848 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5850 let (mut htlcs_ref, counterparty_commitment_tx) =
5851 self.build_commitment_no_state_update(logger);
5852 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5853 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5854 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5856 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5857 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5860 self.context.latest_monitor_update_id += 1;
5861 let monitor_update = ChannelMonitorUpdate {
5862 update_id: self.context.latest_monitor_update_id,
5863 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5864 commitment_txid: counterparty_commitment_txid,
5865 htlc_outputs: htlcs.clone(),
5866 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5867 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5868 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5869 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5870 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5873 self.context.channel_state.set_awaiting_remote_revoke();
5877 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5878 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5879 where L::Target: Logger
5881 let counterparty_keys = self.context.build_remote_transaction_keys();
5882 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5883 let counterparty_commitment_tx = commitment_stats.tx;
5885 #[cfg(any(test, fuzzing))]
5887 if !self.context.is_outbound() {
5888 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5889 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5890 if let Some(info) = projected_commit_tx_info {
5891 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5892 if info.total_pending_htlcs == total_pending_htlcs
5893 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5894 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5895 && info.feerate == self.context.feerate_per_kw {
5896 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5897 assert_eq!(actual_fee, info.fee);
5903 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5906 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5907 /// generation when we shouldn't change HTLC/channel state.
5908 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5909 // Get the fee tests from `build_commitment_no_state_update`
5910 #[cfg(any(test, fuzzing))]
5911 self.build_commitment_no_state_update(logger);
5913 let counterparty_keys = self.context.build_remote_transaction_keys();
5914 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5915 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5917 match &self.context.holder_signer {
5918 ChannelSignerType::Ecdsa(ecdsa) => {
5919 let (signature, htlc_signatures);
5922 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5923 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5927 let res = ecdsa.sign_counterparty_commitment(
5928 &commitment_stats.tx,
5929 commitment_stats.inbound_htlc_preimages,
5930 commitment_stats.outbound_htlc_preimages,
5931 &self.context.secp_ctx,
5932 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5934 htlc_signatures = res.1;
5936 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5937 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5938 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5939 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5941 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5942 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5943 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5944 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5945 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5946 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5950 Ok((msgs::CommitmentSigned {
5951 channel_id: self.context.channel_id,
5955 partial_signature_with_nonce: None,
5956 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5958 // TODO (taproot|arik)
5964 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5965 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5967 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5968 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5969 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5970 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5971 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5972 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5973 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5974 where F::Target: FeeEstimator, L::Target: Logger
5976 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5977 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
5978 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5981 let monitor_update = self.build_commitment_no_status_check(logger);
5982 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5983 Ok(self.push_ret_blockable_mon_update(monitor_update))
5989 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5991 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5992 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5993 fee_base_msat: msg.contents.fee_base_msat,
5994 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5995 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5997 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5999 self.context.counterparty_forwarding_info = new_forwarding_info;
6005 /// Begins the shutdown process, getting a message for the remote peer and returning all
6006 /// holding cell HTLCs for payment failure.
6008 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
6009 /// [`ChannelMonitorUpdate`] will be returned).
6010 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6011 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6012 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
6014 for htlc in self.context.pending_outbound_htlcs.iter() {
6015 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6016 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6019 if self.context.channel_state.is_local_shutdown_sent() {
6020 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6022 else if self.context.channel_state.is_remote_shutdown_sent() {
6023 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6025 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6026 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6028 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6029 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6030 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6033 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
6034 // script is set, we just force-close and call it a day.
6035 let mut chan_closed = false;
6036 if self.context.channel_state.is_pre_funded_state() {
6040 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6042 None if !chan_closed => {
6043 // use override shutdown script if provided
6044 let shutdown_scriptpubkey = match override_shutdown_script {
6045 Some(script) => script,
6047 // otherwise, use the shutdown scriptpubkey provided by the signer
6048 match signer_provider.get_shutdown_scriptpubkey() {
6049 Ok(scriptpubkey) => scriptpubkey,
6050 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6054 if !shutdown_scriptpubkey.is_compatible(their_features) {
6055 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6057 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6063 // From here on out, we may not fail!
6064 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6065 let shutdown_result = if self.context.channel_state.is_pre_funded_state() {
6066 let shutdown_result = ShutdownResult {
6067 monitor_update: None,
6068 dropped_outbound_htlcs: Vec::new(),
6069 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
6070 channel_id: self.context.channel_id,
6071 counterparty_node_id: self.context.counterparty_node_id,
6073 self.context.channel_state = ChannelState::ShutdownComplete;
6074 Some(shutdown_result)
6076 self.context.channel_state.set_local_shutdown_sent();
6079 self.context.update_time_counter += 1;
6081 let monitor_update = if update_shutdown_script {
6082 self.context.latest_monitor_update_id += 1;
6083 let monitor_update = ChannelMonitorUpdate {
6084 update_id: self.context.latest_monitor_update_id,
6085 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6086 scriptpubkey: self.get_closing_scriptpubkey(),
6089 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6090 self.push_ret_blockable_mon_update(monitor_update)
6092 let shutdown = msgs::Shutdown {
6093 channel_id: self.context.channel_id,
6094 scriptpubkey: self.get_closing_scriptpubkey(),
6097 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6098 // our shutdown until we've committed all of the pending changes.
6099 self.context.holding_cell_update_fee = None;
6100 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6101 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6103 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6104 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6111 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6112 "we can't both complete shutdown and return a monitor update");
6114 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
6117 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6118 self.context.holding_cell_htlc_updates.iter()
6119 .flat_map(|htlc_update| {
6121 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6122 => Some((source, payment_hash)),
6126 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6130 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6131 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6132 pub context: ChannelContext<SP>,
6133 pub unfunded_context: UnfundedChannelContext,
6136 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6137 pub fn new<ES: Deref, F: Deref>(
6138 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6139 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6140 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6141 ) -> Result<OutboundV1Channel<SP>, APIError>
6142 where ES::Target: EntropySource,
6143 F::Target: FeeEstimator
6145 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6146 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6147 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6148 let pubkeys = holder_signer.pubkeys().clone();
6150 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6151 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6153 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6154 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6156 let channel_value_msat = channel_value_satoshis * 1000;
6157 if push_msat > channel_value_msat {
6158 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6160 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6161 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6163 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6164 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6165 // Protocol level safety check in place, although it should never happen because
6166 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6167 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6170 let channel_type = Self::get_initial_channel_type(&config, their_features);
6171 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6173 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6174 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6176 (ConfirmationTarget::NonAnchorChannelFee, 0)
6178 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6180 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6181 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6182 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6183 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6186 let mut secp_ctx = Secp256k1::new();
6187 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6189 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6190 match signer_provider.get_shutdown_scriptpubkey() {
6191 Ok(scriptpubkey) => Some(scriptpubkey),
6192 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6196 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6197 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6198 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6202 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6203 Ok(script) => script,
6204 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6207 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6210 context: ChannelContext {
6213 config: LegacyChannelConfig {
6214 options: config.channel_config.clone(),
6215 announced_channel: config.channel_handshake_config.announced_channel,
6216 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6221 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6223 channel_id: temporary_channel_id,
6224 temporary_channel_id: Some(temporary_channel_id),
6225 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6226 announcement_sigs_state: AnnouncementSigsState::NotSent,
6228 channel_value_satoshis,
6230 latest_monitor_update_id: 0,
6232 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6233 shutdown_scriptpubkey,
6236 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6237 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6240 pending_inbound_htlcs: Vec::new(),
6241 pending_outbound_htlcs: Vec::new(),
6242 holding_cell_htlc_updates: Vec::new(),
6243 pending_update_fee: None,
6244 holding_cell_update_fee: None,
6245 next_holder_htlc_id: 0,
6246 next_counterparty_htlc_id: 0,
6247 update_time_counter: 1,
6249 resend_order: RAACommitmentOrder::CommitmentFirst,
6251 monitor_pending_channel_ready: false,
6252 monitor_pending_revoke_and_ack: false,
6253 monitor_pending_commitment_signed: false,
6254 monitor_pending_forwards: Vec::new(),
6255 monitor_pending_failures: Vec::new(),
6256 monitor_pending_finalized_fulfills: Vec::new(),
6258 signer_pending_commitment_update: false,
6259 signer_pending_funding: false,
6261 #[cfg(debug_assertions)]
6262 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6263 #[cfg(debug_assertions)]
6264 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6266 last_sent_closing_fee: None,
6267 pending_counterparty_closing_signed: None,
6268 expecting_peer_commitment_signed: false,
6269 closing_fee_limits: None,
6270 target_closing_feerate_sats_per_kw: None,
6272 funding_tx_confirmed_in: None,
6273 funding_tx_confirmation_height: 0,
6274 short_channel_id: None,
6275 channel_creation_height: current_chain_height,
6277 feerate_per_kw: commitment_feerate,
6278 counterparty_dust_limit_satoshis: 0,
6279 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6280 counterparty_max_htlc_value_in_flight_msat: 0,
6281 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6282 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6283 holder_selected_channel_reserve_satoshis,
6284 counterparty_htlc_minimum_msat: 0,
6285 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6286 counterparty_max_accepted_htlcs: 0,
6287 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6288 minimum_depth: None, // Filled in in accept_channel
6290 counterparty_forwarding_info: None,
6292 channel_transaction_parameters: ChannelTransactionParameters {
6293 holder_pubkeys: pubkeys,
6294 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6295 is_outbound_from_holder: true,
6296 counterparty_parameters: None,
6297 funding_outpoint: None,
6298 channel_type_features: channel_type.clone()
6300 funding_transaction: None,
6301 is_batch_funding: None,
6303 counterparty_cur_commitment_point: None,
6304 counterparty_prev_commitment_point: None,
6305 counterparty_node_id,
6307 counterparty_shutdown_scriptpubkey: None,
6309 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6311 channel_update_status: ChannelUpdateStatus::Enabled,
6312 closing_signed_in_flight: false,
6314 announcement_sigs: None,
6316 #[cfg(any(test, fuzzing))]
6317 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6318 #[cfg(any(test, fuzzing))]
6319 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6321 workaround_lnd_bug_4006: None,
6322 sent_message_awaiting_response: None,
6324 latest_inbound_scid_alias: None,
6325 outbound_scid_alias,
6327 channel_pending_event_emitted: false,
6328 channel_ready_event_emitted: false,
6330 #[cfg(any(test, fuzzing))]
6331 historical_inbound_htlc_fulfills: HashSet::new(),
6336 blocked_monitor_updates: Vec::new(),
6338 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6342 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6343 /// a funding_created message for the remote peer.
6344 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6345 /// or if called on an inbound channel.
6346 /// Note that channel_id changes during this call!
6347 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6348 /// If an Err is returned, it is a ChannelError::Close.
6349 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6350 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6351 if !self.context.is_outbound() {
6352 panic!("Tried to create outbound funding_created message on an inbound channel!");
6355 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6356 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6358 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6360 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6361 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6362 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6363 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6366 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6367 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6369 // Now that we're past error-generating stuff, update our local state:
6371 self.context.channel_state = ChannelState::FundingNegotiated;
6372 self.context.channel_id = funding_txo.to_channel_id();
6374 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6375 // We can skip this if it is a zero-conf channel.
6376 if funding_transaction.is_coin_base() &&
6377 self.context.minimum_depth.unwrap_or(0) > 0 &&
6378 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6379 self.context.minimum_depth = Some(COINBASE_MATURITY);
6382 self.context.funding_transaction = Some(funding_transaction);
6383 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6385 let funding_created = self.context.get_funding_created_msg(logger);
6386 if funding_created.is_none() {
6387 if !self.context.signer_pending_funding {
6388 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6389 self.context.signer_pending_funding = true;
6396 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6397 // The default channel type (ie the first one we try) depends on whether the channel is
6398 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6399 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6400 // with no other changes, and fall back to `only_static_remotekey`.
6401 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6402 if !config.channel_handshake_config.announced_channel &&
6403 config.channel_handshake_config.negotiate_scid_privacy &&
6404 their_features.supports_scid_privacy() {
6405 ret.set_scid_privacy_required();
6408 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6409 // set it now. If they don't understand it, we'll fall back to our default of
6410 // `only_static_remotekey`.
6411 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6412 their_features.supports_anchors_zero_fee_htlc_tx() {
6413 ret.set_anchors_zero_fee_htlc_tx_required();
6419 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6420 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6421 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6422 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6423 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6424 ) -> Result<msgs::OpenChannel, ()>
6426 F::Target: FeeEstimator
6428 if !self.context.is_outbound() ||
6430 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6431 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6436 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6437 // We've exhausted our options
6440 // We support opening a few different types of channels. Try removing our additional
6441 // features one by one until we've either arrived at our default or the counterparty has
6444 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6445 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6446 // checks whether the counterparty supports every feature, this would only happen if the
6447 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6449 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6450 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6451 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6452 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6453 } else if self.context.channel_type.supports_scid_privacy() {
6454 self.context.channel_type.clear_scid_privacy();
6456 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6458 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6459 Ok(self.get_open_channel(chain_hash))
6462 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6463 if !self.context.is_outbound() {
6464 panic!("Tried to open a channel for an inbound channel?");
6466 if self.context.have_received_message() {
6467 panic!("Cannot generate an open_channel after we've moved forward");
6470 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6471 panic!("Tried to send an open_channel for a channel that has already advanced");
6474 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6475 let keys = self.context.get_holder_pubkeys();
6479 temporary_channel_id: self.context.channel_id,
6480 funding_satoshis: self.context.channel_value_satoshis,
6481 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6482 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6483 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6484 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6485 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6486 feerate_per_kw: self.context.feerate_per_kw as u32,
6487 to_self_delay: self.context.get_holder_selected_contest_delay(),
6488 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6489 funding_pubkey: keys.funding_pubkey,
6490 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6491 payment_point: keys.payment_point,
6492 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6493 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6494 first_per_commitment_point,
6495 channel_flags: if self.context.config.announced_channel {1} else {0},
6496 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6497 Some(script) => script.clone().into_inner(),
6498 None => Builder::new().into_script(),
6500 channel_type: Some(self.context.channel_type.clone()),
6505 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6506 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6508 // Check sanity of message fields:
6509 if !self.context.is_outbound() {
6510 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6512 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6513 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6515 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6516 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6518 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6519 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6521 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6522 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6524 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6525 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6526 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6528 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6529 if msg.htlc_minimum_msat >= full_channel_value_msat {
6530 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6532 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6533 if msg.to_self_delay > max_delay_acceptable {
6534 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6536 if msg.max_accepted_htlcs < 1 {
6537 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6539 if msg.max_accepted_htlcs > MAX_HTLCS {
6540 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6543 // Now check against optional parameters as set by config...
6544 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6545 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6547 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6548 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6550 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6551 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6553 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6554 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6556 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6557 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6559 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6560 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6562 if msg.minimum_depth > peer_limits.max_minimum_depth {
6563 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6566 if let Some(ty) = &msg.channel_type {
6567 if *ty != self.context.channel_type {
6568 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6570 } else if their_features.supports_channel_type() {
6571 // Assume they've accepted the channel type as they said they understand it.
6573 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6574 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6575 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6577 self.context.channel_type = channel_type.clone();
6578 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6581 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6582 match &msg.shutdown_scriptpubkey {
6583 &Some(ref script) => {
6584 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6585 if script.len() == 0 {
6588 if !script::is_bolt2_compliant(&script, their_features) {
6589 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6591 Some(script.clone())
6594 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6596 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6601 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6602 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6603 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6604 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6605 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6607 if peer_limits.trust_own_funding_0conf {
6608 self.context.minimum_depth = Some(msg.minimum_depth);
6610 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6613 let counterparty_pubkeys = ChannelPublicKeys {
6614 funding_pubkey: msg.funding_pubkey,
6615 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6616 payment_point: msg.payment_point,
6617 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6618 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6621 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6622 selected_contest_delay: msg.to_self_delay,
6623 pubkeys: counterparty_pubkeys,
6626 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6627 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6629 self.context.channel_state = ChannelState::NegotiatingFunding(
6630 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6632 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6637 /// Handles a funding_signed message from the remote end.
6638 /// If this call is successful, broadcast the funding transaction (and not before!)
6639 pub fn funding_signed<L: Deref>(
6640 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6641 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6645 if !self.context.is_outbound() {
6646 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6648 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6649 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6651 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6652 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6653 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6654 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6657 let funding_script = self.context.get_funding_redeemscript();
6659 let counterparty_keys = self.context.build_remote_transaction_keys();
6660 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6661 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6662 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6664 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6665 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6667 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6668 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6670 let trusted_tx = initial_commitment_tx.trust();
6671 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6672 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6673 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6674 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6675 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6679 let holder_commitment_tx = HolderCommitmentTransaction::new(
6680 initial_commitment_tx,
6683 &self.context.get_holder_pubkeys().funding_pubkey,
6684 self.context.counterparty_funding_pubkey()
6688 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6689 if validated.is_err() {
6690 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6693 let funding_redeemscript = self.context.get_funding_redeemscript();
6694 let funding_txo = self.context.get_funding_txo().unwrap();
6695 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6696 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6697 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6698 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6699 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6700 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6701 shutdown_script, self.context.get_holder_selected_contest_delay(),
6702 &self.context.destination_script, (funding_txo, funding_txo_script),
6703 &self.context.channel_transaction_parameters,
6704 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6706 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6707 channel_monitor.provide_initial_counterparty_commitment_tx(
6708 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6709 self.context.cur_counterparty_commitment_transaction_number,
6710 self.context.counterparty_cur_commitment_point.unwrap(),
6711 counterparty_initial_commitment_tx.feerate_per_kw(),
6712 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6713 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6715 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6716 if self.context.is_batch_funding() {
6717 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6719 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6721 self.context.cur_holder_commitment_transaction_number -= 1;
6722 self.context.cur_counterparty_commitment_transaction_number -= 1;
6724 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6726 let mut channel = Channel { context: self.context };
6728 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6729 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6730 Ok((channel, channel_monitor))
6733 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6736 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6737 if self.context.signer_pending_funding && self.context.is_outbound() {
6738 log_trace!(logger, "Signer unblocked a funding_created");
6739 self.context.get_funding_created_msg(logger)
6744 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6745 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6746 pub context: ChannelContext<SP>,
6747 pub unfunded_context: UnfundedChannelContext,
6750 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6751 /// Creates a new channel from a remote sides' request for one.
6752 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6753 pub fn new<ES: Deref, F: Deref, L: Deref>(
6754 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6755 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6756 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6757 current_chain_height: u32, logger: &L, is_0conf: bool,
6758 ) -> Result<InboundV1Channel<SP>, ChannelError>
6759 where ES::Target: EntropySource,
6760 F::Target: FeeEstimator,
6763 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6764 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6766 // First check the channel type is known, failing before we do anything else if we don't
6767 // support this channel type.
6768 let channel_type = if let Some(channel_type) = &msg.channel_type {
6769 if channel_type.supports_any_optional_bits() {
6770 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6773 // We only support the channel types defined by the `ChannelManager` in
6774 // `provided_channel_type_features`. The channel type must always support
6775 // `static_remote_key`.
6776 if !channel_type.requires_static_remote_key() {
6777 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6779 // Make sure we support all of the features behind the channel type.
6780 if !channel_type.is_subset(our_supported_features) {
6781 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6783 if channel_type.requires_scid_privacy() && announced_channel {
6784 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6786 channel_type.clone()
6788 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6789 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6790 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6795 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6796 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6797 let pubkeys = holder_signer.pubkeys().clone();
6798 let counterparty_pubkeys = ChannelPublicKeys {
6799 funding_pubkey: msg.funding_pubkey,
6800 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6801 payment_point: msg.payment_point,
6802 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6803 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6806 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6807 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6810 // Check sanity of message fields:
6811 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6812 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6814 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6815 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6817 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6818 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6820 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6821 if msg.push_msat > full_channel_value_msat {
6822 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6824 if msg.dust_limit_satoshis > msg.funding_satoshis {
6825 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6827 if msg.htlc_minimum_msat >= full_channel_value_msat {
6828 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6830 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6832 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6833 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6834 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6836 if msg.max_accepted_htlcs < 1 {
6837 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6839 if msg.max_accepted_htlcs > MAX_HTLCS {
6840 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6843 // Now check against optional parameters as set by config...
6844 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6845 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6847 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6848 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6850 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6851 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6853 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6854 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6856 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6857 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6859 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6860 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6862 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6863 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6866 // Convert things into internal flags and prep our state:
6868 if config.channel_handshake_limits.force_announced_channel_preference {
6869 if config.channel_handshake_config.announced_channel != announced_channel {
6870 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6874 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6875 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6876 // Protocol level safety check in place, although it should never happen because
6877 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6878 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6880 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6881 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6883 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6884 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6885 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6887 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6888 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6891 // check if the funder's amount for the initial commitment tx is sufficient
6892 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6893 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6894 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6898 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6899 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6900 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6901 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6904 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6905 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6906 // want to push much to us), our counterparty should always have more than our reserve.
6907 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6908 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6911 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6912 match &msg.shutdown_scriptpubkey {
6913 &Some(ref script) => {
6914 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6915 if script.len() == 0 {
6918 if !script::is_bolt2_compliant(&script, their_features) {
6919 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6921 Some(script.clone())
6924 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6926 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6931 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6932 match signer_provider.get_shutdown_scriptpubkey() {
6933 Ok(scriptpubkey) => Some(scriptpubkey),
6934 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6938 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6939 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6940 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6944 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6945 Ok(script) => script,
6946 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6949 let mut secp_ctx = Secp256k1::new();
6950 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6952 let minimum_depth = if is_0conf {
6955 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6959 context: ChannelContext {
6962 config: LegacyChannelConfig {
6963 options: config.channel_config.clone(),
6965 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6970 inbound_handshake_limits_override: None,
6972 temporary_channel_id: Some(msg.temporary_channel_id),
6973 channel_id: msg.temporary_channel_id,
6974 channel_state: ChannelState::NegotiatingFunding(
6975 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6977 announcement_sigs_state: AnnouncementSigsState::NotSent,
6980 latest_monitor_update_id: 0,
6982 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6983 shutdown_scriptpubkey,
6986 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6987 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6988 value_to_self_msat: msg.push_msat,
6990 pending_inbound_htlcs: Vec::new(),
6991 pending_outbound_htlcs: Vec::new(),
6992 holding_cell_htlc_updates: Vec::new(),
6993 pending_update_fee: None,
6994 holding_cell_update_fee: None,
6995 next_holder_htlc_id: 0,
6996 next_counterparty_htlc_id: 0,
6997 update_time_counter: 1,
6999 resend_order: RAACommitmentOrder::CommitmentFirst,
7001 monitor_pending_channel_ready: false,
7002 monitor_pending_revoke_and_ack: false,
7003 monitor_pending_commitment_signed: false,
7004 monitor_pending_forwards: Vec::new(),
7005 monitor_pending_failures: Vec::new(),
7006 monitor_pending_finalized_fulfills: Vec::new(),
7008 signer_pending_commitment_update: false,
7009 signer_pending_funding: false,
7011 #[cfg(debug_assertions)]
7012 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7013 #[cfg(debug_assertions)]
7014 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7016 last_sent_closing_fee: None,
7017 pending_counterparty_closing_signed: None,
7018 expecting_peer_commitment_signed: false,
7019 closing_fee_limits: None,
7020 target_closing_feerate_sats_per_kw: None,
7022 funding_tx_confirmed_in: None,
7023 funding_tx_confirmation_height: 0,
7024 short_channel_id: None,
7025 channel_creation_height: current_chain_height,
7027 feerate_per_kw: msg.feerate_per_kw,
7028 channel_value_satoshis: msg.funding_satoshis,
7029 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7030 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7031 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7032 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7033 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7034 holder_selected_channel_reserve_satoshis,
7035 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7036 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7037 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7038 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7041 counterparty_forwarding_info: None,
7043 channel_transaction_parameters: ChannelTransactionParameters {
7044 holder_pubkeys: pubkeys,
7045 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7046 is_outbound_from_holder: false,
7047 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7048 selected_contest_delay: msg.to_self_delay,
7049 pubkeys: counterparty_pubkeys,
7051 funding_outpoint: None,
7052 channel_type_features: channel_type.clone()
7054 funding_transaction: None,
7055 is_batch_funding: None,
7057 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7058 counterparty_prev_commitment_point: None,
7059 counterparty_node_id,
7061 counterparty_shutdown_scriptpubkey,
7063 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7065 channel_update_status: ChannelUpdateStatus::Enabled,
7066 closing_signed_in_flight: false,
7068 announcement_sigs: None,
7070 #[cfg(any(test, fuzzing))]
7071 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7072 #[cfg(any(test, fuzzing))]
7073 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7075 workaround_lnd_bug_4006: None,
7076 sent_message_awaiting_response: None,
7078 latest_inbound_scid_alias: None,
7079 outbound_scid_alias: 0,
7081 channel_pending_event_emitted: false,
7082 channel_ready_event_emitted: false,
7084 #[cfg(any(test, fuzzing))]
7085 historical_inbound_htlc_fulfills: HashSet::new(),
7090 blocked_monitor_updates: Vec::new(),
7092 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7098 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7099 /// should be sent back to the counterparty node.
7101 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7102 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7103 if self.context.is_outbound() {
7104 panic!("Tried to send accept_channel for an outbound channel?");
7107 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7108 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7110 panic!("Tried to send accept_channel after channel had moved forward");
7112 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7113 panic!("Tried to send an accept_channel for a channel that has already advanced");
7116 self.generate_accept_channel_message()
7119 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7120 /// inbound channel. If the intention is to accept an inbound channel, use
7121 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7123 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7124 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7125 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7126 let keys = self.context.get_holder_pubkeys();
7128 msgs::AcceptChannel {
7129 temporary_channel_id: self.context.channel_id,
7130 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7131 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7132 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7133 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7134 minimum_depth: self.context.minimum_depth.unwrap(),
7135 to_self_delay: self.context.get_holder_selected_contest_delay(),
7136 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7137 funding_pubkey: keys.funding_pubkey,
7138 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7139 payment_point: keys.payment_point,
7140 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7141 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7142 first_per_commitment_point,
7143 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7144 Some(script) => script.clone().into_inner(),
7145 None => Builder::new().into_script(),
7147 channel_type: Some(self.context.channel_type.clone()),
7149 next_local_nonce: None,
7153 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7154 /// inbound channel without accepting it.
7156 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7158 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7159 self.generate_accept_channel_message()
7162 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7163 let funding_script = self.context.get_funding_redeemscript();
7165 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7166 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7167 let trusted_tx = initial_commitment_tx.trust();
7168 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7169 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7170 // They sign the holder commitment transaction...
7171 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7172 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7173 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7174 encode::serialize_hex(&funding_script), &self.context.channel_id());
7175 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7177 Ok(initial_commitment_tx)
7180 pub fn funding_created<L: Deref>(
7181 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7182 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7186 if self.context.is_outbound() {
7187 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7190 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7191 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7193 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7194 // remember the channel, so it's safe to just send an error_message here and drop the
7196 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7198 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7199 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7200 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7201 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7204 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7205 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7206 // This is an externally observable change before we finish all our checks. In particular
7207 // check_funding_created_signature may fail.
7208 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7210 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7212 Err(ChannelError::Close(e)) => {
7213 self.context.channel_transaction_parameters.funding_outpoint = None;
7214 return Err((self, ChannelError::Close(e)));
7217 // The only error we know how to handle is ChannelError::Close, so we fall over here
7218 // to make sure we don't continue with an inconsistent state.
7219 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7223 let holder_commitment_tx = HolderCommitmentTransaction::new(
7224 initial_commitment_tx,
7227 &self.context.get_holder_pubkeys().funding_pubkey,
7228 self.context.counterparty_funding_pubkey()
7231 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7232 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7235 // Now that we're past error-generating stuff, update our local state:
7237 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7238 self.context.channel_id = funding_txo.to_channel_id();
7239 self.context.cur_counterparty_commitment_transaction_number -= 1;
7240 self.context.cur_holder_commitment_transaction_number -= 1;
7242 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7244 let funding_redeemscript = self.context.get_funding_redeemscript();
7245 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7246 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7247 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7248 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7249 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7250 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7251 shutdown_script, self.context.get_holder_selected_contest_delay(),
7252 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7253 &self.context.channel_transaction_parameters,
7254 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7256 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7257 channel_monitor.provide_initial_counterparty_commitment_tx(
7258 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7259 self.context.cur_counterparty_commitment_transaction_number + 1,
7260 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7261 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7262 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7264 log_info!(logger, "{} funding_signed for peer for channel {}",
7265 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7267 // Promote the channel to a full-fledged one now that we have updated the state and have a
7268 // `ChannelMonitor`.
7269 let mut channel = Channel {
7270 context: self.context,
7272 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7273 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7275 Ok((channel, funding_signed, channel_monitor))
7279 const SERIALIZATION_VERSION: u8 = 3;
7280 const MIN_SERIALIZATION_VERSION: u8 = 3;
7282 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7288 impl Writeable for ChannelUpdateStatus {
7289 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7290 // We only care about writing out the current state as it was announced, ie only either
7291 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7292 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7294 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7295 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7296 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7297 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7303 impl Readable for ChannelUpdateStatus {
7304 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7305 Ok(match <u8 as Readable>::read(reader)? {
7306 0 => ChannelUpdateStatus::Enabled,
7307 1 => ChannelUpdateStatus::Disabled,
7308 _ => return Err(DecodeError::InvalidValue),
7313 impl Writeable for AnnouncementSigsState {
7314 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7315 // We only care about writing out the current state as if we had just disconnected, at
7316 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7318 AnnouncementSigsState::NotSent => 0u8.write(writer),
7319 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7320 AnnouncementSigsState::Committed => 0u8.write(writer),
7321 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7326 impl Readable for AnnouncementSigsState {
7327 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7328 Ok(match <u8 as Readable>::read(reader)? {
7329 0 => AnnouncementSigsState::NotSent,
7330 1 => AnnouncementSigsState::PeerReceived,
7331 _ => return Err(DecodeError::InvalidValue),
7336 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7337 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7338 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7341 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7343 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7344 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7345 // the low bytes now and the optional high bytes later.
7346 let user_id_low = self.context.user_id as u64;
7347 user_id_low.write(writer)?;
7349 // Version 1 deserializers expected to read parts of the config object here. Version 2
7350 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7351 // `minimum_depth` we simply write dummy values here.
7352 writer.write_all(&[0; 8])?;
7354 self.context.channel_id.write(writer)?;
7356 let mut channel_state = self.context.channel_state;
7357 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7358 channel_state.set_peer_disconnected();
7360 channel_state.to_u32().write(writer)?;
7362 self.context.channel_value_satoshis.write(writer)?;
7364 self.context.latest_monitor_update_id.write(writer)?;
7366 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7367 // deserialized from that format.
7368 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7369 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7370 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7372 self.context.destination_script.write(writer)?;
7374 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7375 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7376 self.context.value_to_self_msat.write(writer)?;
7378 let mut dropped_inbound_htlcs = 0;
7379 for htlc in self.context.pending_inbound_htlcs.iter() {
7380 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7381 dropped_inbound_htlcs += 1;
7384 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7385 for htlc in self.context.pending_inbound_htlcs.iter() {
7386 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7389 htlc.htlc_id.write(writer)?;
7390 htlc.amount_msat.write(writer)?;
7391 htlc.cltv_expiry.write(writer)?;
7392 htlc.payment_hash.write(writer)?;
7394 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7395 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7397 htlc_state.write(writer)?;
7399 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7401 htlc_state.write(writer)?;
7403 &InboundHTLCState::Committed => {
7406 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7408 removal_reason.write(writer)?;
7413 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7414 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7415 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7417 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7418 for htlc in self.context.pending_outbound_htlcs.iter() {
7419 htlc.htlc_id.write(writer)?;
7420 htlc.amount_msat.write(writer)?;
7421 htlc.cltv_expiry.write(writer)?;
7422 htlc.payment_hash.write(writer)?;
7423 htlc.source.write(writer)?;
7425 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7427 onion_packet.write(writer)?;
7429 &OutboundHTLCState::Committed => {
7432 &OutboundHTLCState::RemoteRemoved(_) => {
7433 // Treat this as a Committed because we haven't received the CS - they'll
7434 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7437 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7439 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7440 preimages.push(preimage);
7442 let reason: Option<&HTLCFailReason> = outcome.into();
7443 reason.write(writer)?;
7445 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7447 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7448 preimages.push(preimage);
7450 let reason: Option<&HTLCFailReason> = outcome.into();
7451 reason.write(writer)?;
7454 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7455 pending_outbound_blinding_points.push(htlc.blinding_point);
7458 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7459 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7460 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7461 for update in self.context.holding_cell_htlc_updates.iter() {
7463 &HTLCUpdateAwaitingACK::AddHTLC {
7464 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7465 blinding_point, skimmed_fee_msat,
7468 amount_msat.write(writer)?;
7469 cltv_expiry.write(writer)?;
7470 payment_hash.write(writer)?;
7471 source.write(writer)?;
7472 onion_routing_packet.write(writer)?;
7474 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7475 holding_cell_blinding_points.push(blinding_point);
7477 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7479 payment_preimage.write(writer)?;
7480 htlc_id.write(writer)?;
7482 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7484 htlc_id.write(writer)?;
7485 err_packet.write(writer)?;
7490 match self.context.resend_order {
7491 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7492 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7495 self.context.monitor_pending_channel_ready.write(writer)?;
7496 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7497 self.context.monitor_pending_commitment_signed.write(writer)?;
7499 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7500 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7501 pending_forward.write(writer)?;
7502 htlc_id.write(writer)?;
7505 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7506 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7507 htlc_source.write(writer)?;
7508 payment_hash.write(writer)?;
7509 fail_reason.write(writer)?;
7512 if self.context.is_outbound() {
7513 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7514 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7515 Some(feerate).write(writer)?;
7517 // As for inbound HTLCs, if the update was only announced and never committed in a
7518 // commitment_signed, drop it.
7519 None::<u32>.write(writer)?;
7521 self.context.holding_cell_update_fee.write(writer)?;
7523 self.context.next_holder_htlc_id.write(writer)?;
7524 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7525 self.context.update_time_counter.write(writer)?;
7526 self.context.feerate_per_kw.write(writer)?;
7528 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7529 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7530 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7531 // consider the stale state on reload.
7534 self.context.funding_tx_confirmed_in.write(writer)?;
7535 self.context.funding_tx_confirmation_height.write(writer)?;
7536 self.context.short_channel_id.write(writer)?;
7538 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7539 self.context.holder_dust_limit_satoshis.write(writer)?;
7540 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7542 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7543 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7545 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7546 self.context.holder_htlc_minimum_msat.write(writer)?;
7547 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7549 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7550 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7552 match &self.context.counterparty_forwarding_info {
7555 info.fee_base_msat.write(writer)?;
7556 info.fee_proportional_millionths.write(writer)?;
7557 info.cltv_expiry_delta.write(writer)?;
7559 None => 0u8.write(writer)?
7562 self.context.channel_transaction_parameters.write(writer)?;
7563 self.context.funding_transaction.write(writer)?;
7565 self.context.counterparty_cur_commitment_point.write(writer)?;
7566 self.context.counterparty_prev_commitment_point.write(writer)?;
7567 self.context.counterparty_node_id.write(writer)?;
7569 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7571 self.context.commitment_secrets.write(writer)?;
7573 self.context.channel_update_status.write(writer)?;
7575 #[cfg(any(test, fuzzing))]
7576 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7577 #[cfg(any(test, fuzzing))]
7578 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7579 htlc.write(writer)?;
7582 // If the channel type is something other than only-static-remote-key, then we need to have
7583 // older clients fail to deserialize this channel at all. If the type is
7584 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7586 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7587 Some(&self.context.channel_type) } else { None };
7589 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7590 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7591 // a different percentage of the channel value then 10%, which older versions of LDK used
7592 // to set it to before the percentage was made configurable.
7593 let serialized_holder_selected_reserve =
7594 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7595 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7597 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7598 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7599 let serialized_holder_htlc_max_in_flight =
7600 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7601 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7603 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7604 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7606 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7607 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7608 // we write the high bytes as an option here.
7609 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7611 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7613 write_tlv_fields!(writer, {
7614 (0, self.context.announcement_sigs, option),
7615 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7616 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7617 // them twice, once with their original default values above, and once as an option
7618 // here. On the read side, old versions will simply ignore the odd-type entries here,
7619 // and new versions map the default values to None and allow the TLV entries here to
7621 (1, self.context.minimum_depth, option),
7622 (2, chan_type, option),
7623 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7624 (4, serialized_holder_selected_reserve, option),
7625 (5, self.context.config, required),
7626 (6, serialized_holder_htlc_max_in_flight, option),
7627 (7, self.context.shutdown_scriptpubkey, option),
7628 (8, self.context.blocked_monitor_updates, optional_vec),
7629 (9, self.context.target_closing_feerate_sats_per_kw, option),
7630 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7631 (13, self.context.channel_creation_height, required),
7632 (15, preimages, required_vec),
7633 (17, self.context.announcement_sigs_state, required),
7634 (19, self.context.latest_inbound_scid_alias, option),
7635 (21, self.context.outbound_scid_alias, required),
7636 (23, channel_ready_event_emitted, option),
7637 (25, user_id_high_opt, option),
7638 (27, self.context.channel_keys_id, required),
7639 (28, holder_max_accepted_htlcs, option),
7640 (29, self.context.temporary_channel_id, option),
7641 (31, channel_pending_event_emitted, option),
7642 (35, pending_outbound_skimmed_fees, optional_vec),
7643 (37, holding_cell_skimmed_fees, optional_vec),
7644 (38, self.context.is_batch_funding, option),
7645 (39, pending_outbound_blinding_points, optional_vec),
7646 (41, holding_cell_blinding_points, optional_vec),
7653 const MAX_ALLOC_SIZE: usize = 64*1024;
7654 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7656 ES::Target: EntropySource,
7657 SP::Target: SignerProvider
7659 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7660 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7661 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7663 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7664 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7665 // the low bytes now and the high bytes later.
7666 let user_id_low: u64 = Readable::read(reader)?;
7668 let mut config = Some(LegacyChannelConfig::default());
7670 // Read the old serialization of the ChannelConfig from version 0.0.98.
7671 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7672 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7673 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7674 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7676 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7677 let mut _val: u64 = Readable::read(reader)?;
7680 let channel_id = Readable::read(reader)?;
7681 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7682 let channel_value_satoshis = Readable::read(reader)?;
7684 let latest_monitor_update_id = Readable::read(reader)?;
7686 let mut keys_data = None;
7688 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7689 // the `channel_keys_id` TLV is present below.
7690 let keys_len: u32 = Readable::read(reader)?;
7691 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7692 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7693 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7694 let mut data = [0; 1024];
7695 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7696 reader.read_exact(read_slice)?;
7697 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7701 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7702 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7703 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7706 let destination_script = Readable::read(reader)?;
7708 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7709 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7710 let value_to_self_msat = Readable::read(reader)?;
7712 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7714 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7715 for _ in 0..pending_inbound_htlc_count {
7716 pending_inbound_htlcs.push(InboundHTLCOutput {
7717 htlc_id: Readable::read(reader)?,
7718 amount_msat: Readable::read(reader)?,
7719 cltv_expiry: Readable::read(reader)?,
7720 payment_hash: Readable::read(reader)?,
7721 state: match <u8 as Readable>::read(reader)? {
7722 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7723 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7724 3 => InboundHTLCState::Committed,
7725 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7726 _ => return Err(DecodeError::InvalidValue),
7731 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7732 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7733 for _ in 0..pending_outbound_htlc_count {
7734 pending_outbound_htlcs.push(OutboundHTLCOutput {
7735 htlc_id: Readable::read(reader)?,
7736 amount_msat: Readable::read(reader)?,
7737 cltv_expiry: Readable::read(reader)?,
7738 payment_hash: Readable::read(reader)?,
7739 source: Readable::read(reader)?,
7740 state: match <u8 as Readable>::read(reader)? {
7741 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7742 1 => OutboundHTLCState::Committed,
7744 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7745 OutboundHTLCState::RemoteRemoved(option.into())
7748 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7749 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7752 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7753 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7755 _ => return Err(DecodeError::InvalidValue),
7757 skimmed_fee_msat: None,
7758 blinding_point: None,
7762 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7763 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7764 for _ in 0..holding_cell_htlc_update_count {
7765 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7766 0 => HTLCUpdateAwaitingACK::AddHTLC {
7767 amount_msat: Readable::read(reader)?,
7768 cltv_expiry: Readable::read(reader)?,
7769 payment_hash: Readable::read(reader)?,
7770 source: Readable::read(reader)?,
7771 onion_routing_packet: Readable::read(reader)?,
7772 skimmed_fee_msat: None,
7773 blinding_point: None,
7775 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7776 payment_preimage: Readable::read(reader)?,
7777 htlc_id: Readable::read(reader)?,
7779 2 => HTLCUpdateAwaitingACK::FailHTLC {
7780 htlc_id: Readable::read(reader)?,
7781 err_packet: Readable::read(reader)?,
7783 _ => return Err(DecodeError::InvalidValue),
7787 let resend_order = match <u8 as Readable>::read(reader)? {
7788 0 => RAACommitmentOrder::CommitmentFirst,
7789 1 => RAACommitmentOrder::RevokeAndACKFirst,
7790 _ => return Err(DecodeError::InvalidValue),
7793 let monitor_pending_channel_ready = Readable::read(reader)?;
7794 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7795 let monitor_pending_commitment_signed = Readable::read(reader)?;
7797 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7798 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7799 for _ in 0..monitor_pending_forwards_count {
7800 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7803 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7804 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7805 for _ in 0..monitor_pending_failures_count {
7806 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7809 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7811 let holding_cell_update_fee = Readable::read(reader)?;
7813 let next_holder_htlc_id = Readable::read(reader)?;
7814 let next_counterparty_htlc_id = Readable::read(reader)?;
7815 let update_time_counter = Readable::read(reader)?;
7816 let feerate_per_kw = Readable::read(reader)?;
7818 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7819 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7820 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7821 // consider the stale state on reload.
7822 match <u8 as Readable>::read(reader)? {
7825 let _: u32 = Readable::read(reader)?;
7826 let _: u64 = Readable::read(reader)?;
7827 let _: Signature = Readable::read(reader)?;
7829 _ => return Err(DecodeError::InvalidValue),
7832 let funding_tx_confirmed_in = Readable::read(reader)?;
7833 let funding_tx_confirmation_height = Readable::read(reader)?;
7834 let short_channel_id = Readable::read(reader)?;
7836 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7837 let holder_dust_limit_satoshis = Readable::read(reader)?;
7838 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7839 let mut counterparty_selected_channel_reserve_satoshis = None;
7841 // Read the old serialization from version 0.0.98.
7842 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7844 // Read the 8 bytes of backwards-compatibility data.
7845 let _dummy: u64 = Readable::read(reader)?;
7847 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7848 let holder_htlc_minimum_msat = Readable::read(reader)?;
7849 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7851 let mut minimum_depth = None;
7853 // Read the old serialization from version 0.0.98.
7854 minimum_depth = Some(Readable::read(reader)?);
7856 // Read the 4 bytes of backwards-compatibility data.
7857 let _dummy: u32 = Readable::read(reader)?;
7860 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7862 1 => Some(CounterpartyForwardingInfo {
7863 fee_base_msat: Readable::read(reader)?,
7864 fee_proportional_millionths: Readable::read(reader)?,
7865 cltv_expiry_delta: Readable::read(reader)?,
7867 _ => return Err(DecodeError::InvalidValue),
7870 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7871 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7873 let counterparty_cur_commitment_point = Readable::read(reader)?;
7875 let counterparty_prev_commitment_point = Readable::read(reader)?;
7876 let counterparty_node_id = Readable::read(reader)?;
7878 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7879 let commitment_secrets = Readable::read(reader)?;
7881 let channel_update_status = Readable::read(reader)?;
7883 #[cfg(any(test, fuzzing))]
7884 let mut historical_inbound_htlc_fulfills = HashSet::new();
7885 #[cfg(any(test, fuzzing))]
7887 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7888 for _ in 0..htlc_fulfills_len {
7889 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7893 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7894 Some((feerate, if channel_parameters.is_outbound_from_holder {
7895 FeeUpdateState::Outbound
7897 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7903 let mut announcement_sigs = None;
7904 let mut target_closing_feerate_sats_per_kw = None;
7905 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7906 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7907 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7908 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7909 // only, so we default to that if none was written.
7910 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7911 let mut channel_creation_height = Some(serialized_height);
7912 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7914 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7915 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7916 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7917 let mut latest_inbound_scid_alias = None;
7918 let mut outbound_scid_alias = None;
7919 let mut channel_pending_event_emitted = None;
7920 let mut channel_ready_event_emitted = None;
7922 let mut user_id_high_opt: Option<u64> = None;
7923 let mut channel_keys_id: Option<[u8; 32]> = None;
7924 let mut temporary_channel_id: Option<ChannelId> = None;
7925 let mut holder_max_accepted_htlcs: Option<u16> = None;
7927 let mut blocked_monitor_updates = Some(Vec::new());
7929 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7930 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7932 let mut is_batch_funding: Option<()> = None;
7934 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7935 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7937 read_tlv_fields!(reader, {
7938 (0, announcement_sigs, option),
7939 (1, minimum_depth, option),
7940 (2, channel_type, option),
7941 (3, counterparty_selected_channel_reserve_satoshis, option),
7942 (4, holder_selected_channel_reserve_satoshis, option),
7943 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7944 (6, holder_max_htlc_value_in_flight_msat, option),
7945 (7, shutdown_scriptpubkey, option),
7946 (8, blocked_monitor_updates, optional_vec),
7947 (9, target_closing_feerate_sats_per_kw, option),
7948 (11, monitor_pending_finalized_fulfills, optional_vec),
7949 (13, channel_creation_height, option),
7950 (15, preimages_opt, optional_vec),
7951 (17, announcement_sigs_state, option),
7952 (19, latest_inbound_scid_alias, option),
7953 (21, outbound_scid_alias, option),
7954 (23, channel_ready_event_emitted, option),
7955 (25, user_id_high_opt, option),
7956 (27, channel_keys_id, option),
7957 (28, holder_max_accepted_htlcs, option),
7958 (29, temporary_channel_id, option),
7959 (31, channel_pending_event_emitted, option),
7960 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7961 (37, holding_cell_skimmed_fees_opt, optional_vec),
7962 (38, is_batch_funding, option),
7963 (39, pending_outbound_blinding_points_opt, optional_vec),
7964 (41, holding_cell_blinding_points_opt, optional_vec),
7967 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7968 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7969 // If we've gotten to the funding stage of the channel, populate the signer with its
7970 // required channel parameters.
7971 if channel_state >= ChannelState::FundingNegotiated {
7972 holder_signer.provide_channel_parameters(&channel_parameters);
7974 (channel_keys_id, holder_signer)
7976 // `keys_data` can be `None` if we had corrupted data.
7977 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7978 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7979 (holder_signer.channel_keys_id(), holder_signer)
7982 if let Some(preimages) = preimages_opt {
7983 let mut iter = preimages.into_iter();
7984 for htlc in pending_outbound_htlcs.iter_mut() {
7986 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7987 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7989 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7990 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7995 // We expect all preimages to be consumed above
7996 if iter.next().is_some() {
7997 return Err(DecodeError::InvalidValue);
8001 let chan_features = channel_type.as_ref().unwrap();
8002 if !chan_features.is_subset(our_supported_features) {
8003 // If the channel was written by a new version and negotiated with features we don't
8004 // understand yet, refuse to read it.
8005 return Err(DecodeError::UnknownRequiredFeature);
8008 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8009 // To account for that, we're proactively setting/overriding the field here.
8010 channel_parameters.channel_type_features = chan_features.clone();
8012 let mut secp_ctx = Secp256k1::new();
8013 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8015 // `user_id` used to be a single u64 value. In order to remain backwards
8016 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8017 // separate u64 values.
8018 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8020 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8022 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8023 let mut iter = skimmed_fees.into_iter();
8024 for htlc in pending_outbound_htlcs.iter_mut() {
8025 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8027 // We expect all skimmed fees to be consumed above
8028 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8030 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8031 let mut iter = skimmed_fees.into_iter();
8032 for htlc in holding_cell_htlc_updates.iter_mut() {
8033 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8034 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8037 // We expect all skimmed fees to be consumed above
8038 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8040 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8041 let mut iter = blinding_pts.into_iter();
8042 for htlc in pending_outbound_htlcs.iter_mut() {
8043 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8045 // We expect all blinding points to be consumed above
8046 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8048 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8049 let mut iter = blinding_pts.into_iter();
8050 for htlc in holding_cell_htlc_updates.iter_mut() {
8051 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8052 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8055 // We expect all blinding points to be consumed above
8056 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8060 context: ChannelContext {
8063 config: config.unwrap(),
8067 // Note that we don't care about serializing handshake limits as we only ever serialize
8068 // channel data after the handshake has completed.
8069 inbound_handshake_limits_override: None,
8072 temporary_channel_id,
8074 announcement_sigs_state: announcement_sigs_state.unwrap(),
8076 channel_value_satoshis,
8078 latest_monitor_update_id,
8080 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8081 shutdown_scriptpubkey,
8084 cur_holder_commitment_transaction_number,
8085 cur_counterparty_commitment_transaction_number,
8088 holder_max_accepted_htlcs,
8089 pending_inbound_htlcs,
8090 pending_outbound_htlcs,
8091 holding_cell_htlc_updates,
8095 monitor_pending_channel_ready,
8096 monitor_pending_revoke_and_ack,
8097 monitor_pending_commitment_signed,
8098 monitor_pending_forwards,
8099 monitor_pending_failures,
8100 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8102 signer_pending_commitment_update: false,
8103 signer_pending_funding: false,
8106 holding_cell_update_fee,
8107 next_holder_htlc_id,
8108 next_counterparty_htlc_id,
8109 update_time_counter,
8112 #[cfg(debug_assertions)]
8113 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8114 #[cfg(debug_assertions)]
8115 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8117 last_sent_closing_fee: None,
8118 pending_counterparty_closing_signed: None,
8119 expecting_peer_commitment_signed: false,
8120 closing_fee_limits: None,
8121 target_closing_feerate_sats_per_kw,
8123 funding_tx_confirmed_in,
8124 funding_tx_confirmation_height,
8126 channel_creation_height: channel_creation_height.unwrap(),
8128 counterparty_dust_limit_satoshis,
8129 holder_dust_limit_satoshis,
8130 counterparty_max_htlc_value_in_flight_msat,
8131 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8132 counterparty_selected_channel_reserve_satoshis,
8133 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8134 counterparty_htlc_minimum_msat,
8135 holder_htlc_minimum_msat,
8136 counterparty_max_accepted_htlcs,
8139 counterparty_forwarding_info,
8141 channel_transaction_parameters: channel_parameters,
8142 funding_transaction,
8145 counterparty_cur_commitment_point,
8146 counterparty_prev_commitment_point,
8147 counterparty_node_id,
8149 counterparty_shutdown_scriptpubkey,
8153 channel_update_status,
8154 closing_signed_in_flight: false,
8158 #[cfg(any(test, fuzzing))]
8159 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8160 #[cfg(any(test, fuzzing))]
8161 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8163 workaround_lnd_bug_4006: None,
8164 sent_message_awaiting_response: None,
8166 latest_inbound_scid_alias,
8167 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8168 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8170 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8171 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8173 #[cfg(any(test, fuzzing))]
8174 historical_inbound_htlc_fulfills,
8176 channel_type: channel_type.unwrap(),
8179 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8188 use bitcoin::blockdata::constants::ChainHash;
8189 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8190 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8191 use bitcoin::blockdata::opcodes;
8192 use bitcoin::network::constants::Network;
8193 use crate::ln::{PaymentHash, PaymentPreimage};
8194 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8195 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8196 use crate::ln::channel::InitFeatures;
8197 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8198 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8199 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8200 use crate::ln::msgs;
8201 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8202 use crate::ln::script::ShutdownScript;
8203 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8204 use crate::chain::BestBlock;
8205 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8206 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8207 use crate::chain::transaction::OutPoint;
8208 use crate::routing::router::{Path, RouteHop};
8209 use crate::util::config::UserConfig;
8210 use crate::util::errors::APIError;
8211 use crate::util::ser::{ReadableArgs, Writeable};
8212 use crate::util::test_utils;
8213 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8214 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8215 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8216 use bitcoin::secp256k1::{SecretKey,PublicKey};
8217 use bitcoin::hashes::sha256::Hash as Sha256;
8218 use bitcoin::hashes::Hash;
8219 use bitcoin::hashes::hex::FromHex;
8220 use bitcoin::hash_types::WPubkeyHash;
8221 use bitcoin::blockdata::locktime::absolute::LockTime;
8222 use bitcoin::address::{WitnessProgram, WitnessVersion};
8223 use crate::prelude::*;
8225 struct TestFeeEstimator {
8228 impl FeeEstimator for TestFeeEstimator {
8229 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8235 fn test_max_funding_satoshis_no_wumbo() {
8236 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8237 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8238 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8242 signer: InMemorySigner,
8245 impl EntropySource for Keys {
8246 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8249 impl SignerProvider for Keys {
8250 type EcdsaSigner = InMemorySigner;
8252 type TaprootSigner = InMemorySigner;
8254 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8255 self.signer.channel_keys_id()
8258 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8262 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8264 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8265 let secp_ctx = Secp256k1::signing_only();
8266 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8267 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8268 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8271 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8272 let secp_ctx = Secp256k1::signing_only();
8273 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8274 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8278 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8279 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8280 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8284 fn upfront_shutdown_script_incompatibility() {
8285 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8286 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8287 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8290 let seed = [42; 32];
8291 let network = Network::Testnet;
8292 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8293 keys_provider.expect(OnGetShutdownScriptpubkey {
8294 returns: non_v0_segwit_shutdown_script.clone(),
8297 let secp_ctx = Secp256k1::new();
8298 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8299 let config = UserConfig::default();
8300 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8301 Err(APIError::IncompatibleShutdownScript { script }) => {
8302 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8304 Err(e) => panic!("Unexpected error: {:?}", e),
8305 Ok(_) => panic!("Expected error"),
8309 // Check that, during channel creation, we use the same feerate in the open channel message
8310 // as we do in the Channel object creation itself.
8312 fn test_open_channel_msg_fee() {
8313 let original_fee = 253;
8314 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8315 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8316 let secp_ctx = Secp256k1::new();
8317 let seed = [42; 32];
8318 let network = Network::Testnet;
8319 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8321 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8322 let config = UserConfig::default();
8323 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8325 // Now change the fee so we can check that the fee in the open_channel message is the
8326 // same as the old fee.
8327 fee_est.fee_est = 500;
8328 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8329 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8333 fn test_holder_vs_counterparty_dust_limit() {
8334 // Test that when calculating the local and remote commitment transaction fees, the correct
8335 // dust limits are used.
8336 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8337 let secp_ctx = Secp256k1::new();
8338 let seed = [42; 32];
8339 let network = Network::Testnet;
8340 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8341 let logger = test_utils::TestLogger::new();
8342 let best_block = BestBlock::from_network(network);
8344 // Go through the flow of opening a channel between two nodes, making sure
8345 // they have different dust limits.
8347 // Create Node A's channel pointing to Node B's pubkey
8348 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8349 let config = UserConfig::default();
8350 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8352 // Create Node B's channel by receiving Node A's open_channel message
8353 // Make sure A's dust limit is as we expect.
8354 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8355 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8356 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8358 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8359 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8360 accept_channel_msg.dust_limit_satoshis = 546;
8361 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8362 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8364 // Node A --> Node B: funding created
8365 let output_script = node_a_chan.context.get_funding_redeemscript();
8366 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8367 value: 10000000, script_pubkey: output_script.clone(),
8369 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8370 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8371 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8373 // Node B --> Node A: funding signed
8374 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8375 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8377 // Put some inbound and outbound HTLCs in A's channel.
8378 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8379 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8381 amount_msat: htlc_amount_msat,
8382 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8383 cltv_expiry: 300000000,
8384 state: InboundHTLCState::Committed,
8387 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8389 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8390 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8391 cltv_expiry: 200000000,
8392 state: OutboundHTLCState::Committed,
8393 source: HTLCSource::OutboundRoute {
8394 path: Path { hops: Vec::new(), blinded_tail: None },
8395 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8396 first_hop_htlc_msat: 548,
8397 payment_id: PaymentId([42; 32]),
8399 skimmed_fee_msat: None,
8400 blinding_point: None,
8403 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8404 // the dust limit check.
8405 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8406 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8407 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8408 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8410 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8411 // of the HTLCs are seen to be above the dust limit.
8412 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8413 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8414 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8415 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8416 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8420 fn test_timeout_vs_success_htlc_dust_limit() {
8421 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8422 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8423 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8424 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8425 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8426 let secp_ctx = Secp256k1::new();
8427 let seed = [42; 32];
8428 let network = Network::Testnet;
8429 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8431 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8432 let config = UserConfig::default();
8433 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8435 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8436 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8438 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8439 // counted as dust when it shouldn't be.
8440 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8441 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8442 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8443 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8445 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8446 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8447 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8448 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8449 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8451 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8453 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8454 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8455 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8456 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8457 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8459 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8460 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8461 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8462 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8463 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8467 fn channel_reestablish_no_updates() {
8468 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8469 let logger = test_utils::TestLogger::new();
8470 let secp_ctx = Secp256k1::new();
8471 let seed = [42; 32];
8472 let network = Network::Testnet;
8473 let best_block = BestBlock::from_network(network);
8474 let chain_hash = ChainHash::using_genesis_block(network);
8475 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8477 // Go through the flow of opening a channel between two nodes.
8479 // Create Node A's channel pointing to Node B's pubkey
8480 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8481 let config = UserConfig::default();
8482 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8484 // Create Node B's channel by receiving Node A's open_channel message
8485 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8486 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8487 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8489 // Node B --> Node A: accept channel
8490 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8491 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8493 // Node A --> Node B: funding created
8494 let output_script = node_a_chan.context.get_funding_redeemscript();
8495 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8496 value: 10000000, script_pubkey: output_script.clone(),
8498 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8499 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8500 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8502 // Node B --> Node A: funding signed
8503 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8504 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8506 // Now disconnect the two nodes and check that the commitment point in
8507 // Node B's channel_reestablish message is sane.
8508 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8509 let msg = node_b_chan.get_channel_reestablish(&&logger);
8510 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8511 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8512 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8514 // Check that the commitment point in Node A's channel_reestablish message
8516 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8517 let msg = node_a_chan.get_channel_reestablish(&&logger);
8518 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8519 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8520 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8524 fn test_configured_holder_max_htlc_value_in_flight() {
8525 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8526 let logger = test_utils::TestLogger::new();
8527 let secp_ctx = Secp256k1::new();
8528 let seed = [42; 32];
8529 let network = Network::Testnet;
8530 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8531 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8532 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8534 let mut config_2_percent = UserConfig::default();
8535 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8536 let mut config_99_percent = UserConfig::default();
8537 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8538 let mut config_0_percent = UserConfig::default();
8539 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8540 let mut config_101_percent = UserConfig::default();
8541 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8543 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8544 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8545 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8546 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8547 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8548 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8550 // Test with the upper bound - 1 of valid values (99%).
8551 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8552 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8553 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8555 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8557 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8558 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8559 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8560 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8561 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8562 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8564 // Test with the upper bound - 1 of valid values (99%).
8565 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8566 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8567 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8569 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8570 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8571 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8572 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8573 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8575 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8576 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8578 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8579 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8580 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8582 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8583 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8584 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8585 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8586 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8588 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8589 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8591 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8592 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8593 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8597 fn test_configured_holder_selected_channel_reserve_satoshis() {
8599 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8600 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8601 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8603 // Test with valid but unreasonably high channel reserves
8604 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8605 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8606 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8608 // Test with calculated channel reserve less than lower bound
8609 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8610 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8612 // Test with invalid channel reserves since sum of both is greater than or equal
8614 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8615 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8618 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8619 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8620 let logger = test_utils::TestLogger::new();
8621 let secp_ctx = Secp256k1::new();
8622 let seed = [42; 32];
8623 let network = Network::Testnet;
8624 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8625 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8626 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8629 let mut outbound_node_config = UserConfig::default();
8630 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8631 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8633 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8634 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8636 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8637 let mut inbound_node_config = UserConfig::default();
8638 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8640 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8641 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8643 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8645 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8646 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8648 // Channel Negotiations failed
8649 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8650 assert!(result.is_err());
8655 fn channel_update() {
8656 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8657 let logger = test_utils::TestLogger::new();
8658 let secp_ctx = Secp256k1::new();
8659 let seed = [42; 32];
8660 let network = Network::Testnet;
8661 let best_block = BestBlock::from_network(network);
8662 let chain_hash = ChainHash::using_genesis_block(network);
8663 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8665 // Create Node A's channel pointing to Node B's pubkey
8666 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8667 let config = UserConfig::default();
8668 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8670 // Create Node B's channel by receiving Node A's open_channel message
8671 // Make sure A's dust limit is as we expect.
8672 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8673 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8674 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8676 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8677 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8678 accept_channel_msg.dust_limit_satoshis = 546;
8679 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8680 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8682 // Node A --> Node B: funding created
8683 let output_script = node_a_chan.context.get_funding_redeemscript();
8684 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8685 value: 10000000, script_pubkey: output_script.clone(),
8687 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8688 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8689 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8691 // Node B --> Node A: funding signed
8692 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8693 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8695 // Make sure that receiving a channel update will update the Channel as expected.
8696 let update = ChannelUpdate {
8697 contents: UnsignedChannelUpdate {
8699 short_channel_id: 0,
8702 cltv_expiry_delta: 100,
8703 htlc_minimum_msat: 5,
8704 htlc_maximum_msat: MAX_VALUE_MSAT,
8706 fee_proportional_millionths: 11,
8707 excess_data: Vec::new(),
8709 signature: Signature::from(unsafe { FFISignature::new() })
8711 assert!(node_a_chan.channel_update(&update).unwrap());
8713 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8714 // change our official htlc_minimum_msat.
8715 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8716 match node_a_chan.context.counterparty_forwarding_info() {
8718 assert_eq!(info.cltv_expiry_delta, 100);
8719 assert_eq!(info.fee_base_msat, 110);
8720 assert_eq!(info.fee_proportional_millionths, 11);
8722 None => panic!("expected counterparty forwarding info to be Some")
8725 assert!(!node_a_chan.channel_update(&update).unwrap());
8729 fn blinding_point_skimmed_fee_ser() {
8730 // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
8731 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8732 let secp_ctx = Secp256k1::new();
8733 let seed = [42; 32];
8734 let network = Network::Testnet;
8735 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8737 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8738 let config = UserConfig::default();
8739 let features = channelmanager::provided_init_features(&config);
8740 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8741 let mut chan = Channel { context: outbound_chan.context };
8743 let dummy_htlc_source = HTLCSource::OutboundRoute {
8745 hops: vec![RouteHop {
8746 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8747 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8748 cltv_expiry_delta: 0, maybe_announced_channel: false,
8752 session_priv: test_utils::privkey(42),
8753 first_hop_htlc_msat: 0,
8754 payment_id: PaymentId([42; 32]),
8756 let dummy_outbound_output = OutboundHTLCOutput {
8759 payment_hash: PaymentHash([43; 32]),
8761 state: OutboundHTLCState::Committed,
8762 source: dummy_htlc_source.clone(),
8763 skimmed_fee_msat: None,
8764 blinding_point: None,
8766 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8767 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8769 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8772 htlc.skimmed_fee_msat = Some(1);
8775 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8777 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8780 payment_hash: PaymentHash([43; 32]),
8781 source: dummy_htlc_source.clone(),
8782 onion_routing_packet: msgs::OnionPacket {
8784 public_key: Ok(test_utils::pubkey(1)),
8785 hop_data: [0; 20*65],
8788 skimmed_fee_msat: None,
8789 blinding_point: None,
8791 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8792 payment_preimage: PaymentPreimage([42; 32]),
8795 let mut holding_cell_htlc_updates = Vec::with_capacity(10);
8798 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8799 } else if i % 3 == 1 {
8800 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8802 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8803 if let HTLCUpdateAwaitingACK::AddHTLC {
8804 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8805 } = &mut dummy_add {
8806 *blinding_point = Some(test_utils::pubkey(42 + i));
8807 *skimmed_fee_msat = Some(42);
8809 holding_cell_htlc_updates.push(dummy_add);
8812 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8814 // Encode and decode the channel and ensure that the HTLCs within are the same.
8815 let encoded_chan = chan.encode();
8816 let mut s = crate::io::Cursor::new(&encoded_chan);
8817 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8818 let features = channelmanager::provided_channel_type_features(&config);
8819 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8820 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8821 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8824 #[cfg(feature = "_test_vectors")]
8826 fn outbound_commitment_test() {
8827 use bitcoin::sighash;
8828 use bitcoin::consensus::encode::serialize;
8829 use bitcoin::sighash::EcdsaSighashType;
8830 use bitcoin::hashes::hex::FromHex;
8831 use bitcoin::hash_types::Txid;
8832 use bitcoin::secp256k1::Message;
8833 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8834 use crate::ln::PaymentPreimage;
8835 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8836 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8837 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8838 use crate::util::logger::Logger;
8839 use crate::sync::Arc;
8840 use core::str::FromStr;
8841 use hex::DisplayHex;
8843 // Test vectors from BOLT 3 Appendices C and F (anchors):
8844 let feeest = TestFeeEstimator{fee_est: 15000};
8845 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8846 let secp_ctx = Secp256k1::new();
8848 let mut signer = InMemorySigner::new(
8850 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8851 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8852 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8853 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8854 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8856 // These aren't set in the test vectors:
8857 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8863 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8864 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8865 let keys_provider = Keys { signer: signer.clone() };
8867 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8868 let mut config = UserConfig::default();
8869 config.channel_handshake_config.announced_channel = false;
8870 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8871 chan.context.holder_dust_limit_satoshis = 546;
8872 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8874 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8876 let counterparty_pubkeys = ChannelPublicKeys {
8877 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8878 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8879 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8880 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8881 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8883 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8884 CounterpartyChannelTransactionParameters {
8885 pubkeys: counterparty_pubkeys.clone(),
8886 selected_contest_delay: 144
8888 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8889 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8891 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8892 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8894 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8895 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8897 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8898 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8900 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8901 // derived from a commitment_seed, so instead we copy it here and call
8902 // build_commitment_transaction.
8903 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8904 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8905 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8906 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8907 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8909 macro_rules! test_commitment {
8910 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8911 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8912 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8916 macro_rules! test_commitment_with_anchors {
8917 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8918 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8919 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8923 macro_rules! test_commitment_common {
8924 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8925 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8927 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8928 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8930 let htlcs = commitment_stats.htlcs_included.drain(..)
8931 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8933 (commitment_stats.tx, htlcs)
8935 let trusted_tx = commitment_tx.trust();
8936 let unsigned_tx = trusted_tx.built_transaction();
8937 let redeemscript = chan.context.get_funding_redeemscript();
8938 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8939 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8940 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8941 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8943 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8944 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8945 let mut counterparty_htlc_sigs = Vec::new();
8946 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8948 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8949 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8950 counterparty_htlc_sigs.push(remote_signature);
8952 assert_eq!(htlcs.len(), per_htlc.len());
8954 let holder_commitment_tx = HolderCommitmentTransaction::new(
8955 commitment_tx.clone(),
8956 counterparty_signature,
8957 counterparty_htlc_sigs,
8958 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8959 chan.context.counterparty_funding_pubkey()
8961 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8962 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8964 let funding_redeemscript = chan.context.get_funding_redeemscript();
8965 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8966 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8968 // ((htlc, counterparty_sig), (index, holder_sig))
8969 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8972 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8973 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8975 let ref htlc = htlcs[$htlc_idx];
8976 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8977 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8978 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8979 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8980 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8981 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8982 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8984 let mut preimage: Option<PaymentPreimage> = None;
8987 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8988 if out == htlc.payment_hash {
8989 preimage = Some(PaymentPreimage([i; 32]));
8993 assert!(preimage.is_some());
8996 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8997 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8998 channel_derivation_parameters: ChannelDerivationParameters {
8999 value_satoshis: chan.context.channel_value_satoshis,
9000 keys_id: chan.context.channel_keys_id,
9001 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9003 commitment_txid: trusted_tx.txid(),
9004 per_commitment_number: trusted_tx.commitment_number(),
9005 per_commitment_point: trusted_tx.per_commitment_point(),
9006 feerate_per_kw: trusted_tx.feerate_per_kw(),
9008 preimage: preimage.clone(),
9009 counterparty_sig: *htlc_counterparty_sig,
9010 }, &secp_ctx).unwrap();
9011 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9012 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9014 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9015 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9016 let trusted_tx = holder_commitment_tx.trust();
9017 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9018 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9019 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9021 assert!(htlc_counterparty_sig_iter.next().is_none());
9025 // anchors: simple commitment tx with no HTLCs and single anchor
9026 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9027 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9028 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9030 // simple commitment tx with no HTLCs
9031 chan.context.value_to_self_msat = 7000000000;
9033 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9034 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9035 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9037 // anchors: simple commitment tx with no HTLCs
9038 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9039 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9040 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9042 chan.context.pending_inbound_htlcs.push({
9043 let mut out = InboundHTLCOutput{
9045 amount_msat: 1000000,
9047 payment_hash: PaymentHash([0; 32]),
9048 state: InboundHTLCState::Committed,
9050 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9053 chan.context.pending_inbound_htlcs.push({
9054 let mut out = InboundHTLCOutput{
9056 amount_msat: 2000000,
9058 payment_hash: PaymentHash([0; 32]),
9059 state: InboundHTLCState::Committed,
9061 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9064 chan.context.pending_outbound_htlcs.push({
9065 let mut out = OutboundHTLCOutput{
9067 amount_msat: 2000000,
9069 payment_hash: PaymentHash([0; 32]),
9070 state: OutboundHTLCState::Committed,
9071 source: HTLCSource::dummy(),
9072 skimmed_fee_msat: None,
9073 blinding_point: None,
9075 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9078 chan.context.pending_outbound_htlcs.push({
9079 let mut out = OutboundHTLCOutput{
9081 amount_msat: 3000000,
9083 payment_hash: PaymentHash([0; 32]),
9084 state: OutboundHTLCState::Committed,
9085 source: HTLCSource::dummy(),
9086 skimmed_fee_msat: None,
9087 blinding_point: None,
9089 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9092 chan.context.pending_inbound_htlcs.push({
9093 let mut out = InboundHTLCOutput{
9095 amount_msat: 4000000,
9097 payment_hash: PaymentHash([0; 32]),
9098 state: InboundHTLCState::Committed,
9100 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9104 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9105 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9106 chan.context.feerate_per_kw = 0;
9108 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9109 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9110 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9113 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9114 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9115 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9118 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9119 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9120 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9123 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9124 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9125 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9128 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9129 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9130 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9133 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9134 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9135 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9138 // commitment tx with seven outputs untrimmed (maximum feerate)
9139 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9140 chan.context.feerate_per_kw = 647;
9142 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9143 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9144 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9147 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9148 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9149 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9152 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9153 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9154 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9157 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9158 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9159 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9162 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9163 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9164 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9167 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9168 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9169 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9172 // commitment tx with six outputs untrimmed (minimum feerate)
9173 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9174 chan.context.feerate_per_kw = 648;
9176 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9177 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9178 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9181 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9182 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9183 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9186 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9187 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9188 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9191 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9192 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9193 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9196 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9197 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9198 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9201 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9202 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9203 chan.context.feerate_per_kw = 645;
9204 chan.context.holder_dust_limit_satoshis = 1001;
9206 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9207 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9208 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9211 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9212 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9213 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9216 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9217 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9218 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9221 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9222 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9223 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9226 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9227 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9228 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9231 // commitment tx with six outputs untrimmed (maximum feerate)
9232 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9233 chan.context.feerate_per_kw = 2069;
9234 chan.context.holder_dust_limit_satoshis = 546;
9236 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9237 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9238 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9241 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9242 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9243 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9246 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9247 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9248 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9251 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9252 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9253 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9256 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9257 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9258 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9261 // commitment tx with five outputs untrimmed (minimum feerate)
9262 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9263 chan.context.feerate_per_kw = 2070;
9265 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9266 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9267 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9270 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9271 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9272 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9275 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9276 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9277 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9280 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9281 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9282 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9285 // commitment tx with five outputs untrimmed (maximum feerate)
9286 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9287 chan.context.feerate_per_kw = 2194;
9289 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9290 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9291 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9294 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9295 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9296 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9299 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9300 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9301 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9304 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9305 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9306 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9309 // commitment tx with four outputs untrimmed (minimum feerate)
9310 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9311 chan.context.feerate_per_kw = 2195;
9313 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9314 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9315 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9318 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9319 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9320 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9323 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9324 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9325 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9328 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9329 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9330 chan.context.feerate_per_kw = 2185;
9331 chan.context.holder_dust_limit_satoshis = 2001;
9332 let cached_channel_type = chan.context.channel_type;
9333 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9335 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9336 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9337 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9340 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9341 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9342 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9345 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9346 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9347 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9350 // commitment tx with four outputs untrimmed (maximum feerate)
9351 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9352 chan.context.feerate_per_kw = 3702;
9353 chan.context.holder_dust_limit_satoshis = 546;
9354 chan.context.channel_type = cached_channel_type.clone();
9356 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9357 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9358 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9361 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9362 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9363 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9366 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9367 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9368 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9371 // commitment tx with three outputs untrimmed (minimum feerate)
9372 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9373 chan.context.feerate_per_kw = 3703;
9375 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9376 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9377 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9380 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9381 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9382 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9385 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9386 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9387 chan.context.feerate_per_kw = 3687;
9388 chan.context.holder_dust_limit_satoshis = 3001;
9389 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9391 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9392 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9393 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9396 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9397 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9398 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9401 // commitment tx with three outputs untrimmed (maximum feerate)
9402 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9403 chan.context.feerate_per_kw = 4914;
9404 chan.context.holder_dust_limit_satoshis = 546;
9405 chan.context.channel_type = cached_channel_type.clone();
9407 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9408 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9409 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9412 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9413 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9414 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9417 // commitment tx with two outputs untrimmed (minimum feerate)
9418 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9419 chan.context.feerate_per_kw = 4915;
9420 chan.context.holder_dust_limit_satoshis = 546;
9422 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9423 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9424 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9426 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9427 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9428 chan.context.feerate_per_kw = 4894;
9429 chan.context.holder_dust_limit_satoshis = 4001;
9430 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9432 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9433 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9434 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9436 // commitment tx with two outputs untrimmed (maximum feerate)
9437 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9438 chan.context.feerate_per_kw = 9651180;
9439 chan.context.holder_dust_limit_satoshis = 546;
9440 chan.context.channel_type = cached_channel_type.clone();
9442 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9443 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9444 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9446 // commitment tx with one output untrimmed (minimum feerate)
9447 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9448 chan.context.feerate_per_kw = 9651181;
9450 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9451 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9452 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9454 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9455 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9456 chan.context.feerate_per_kw = 6216010;
9457 chan.context.holder_dust_limit_satoshis = 4001;
9458 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9460 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9461 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9462 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9464 // commitment tx with fee greater than funder amount
9465 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9466 chan.context.feerate_per_kw = 9651936;
9467 chan.context.holder_dust_limit_satoshis = 546;
9468 chan.context.channel_type = cached_channel_type;
9470 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9471 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9472 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9474 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9475 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9476 chan.context.feerate_per_kw = 253;
9477 chan.context.pending_inbound_htlcs.clear();
9478 chan.context.pending_inbound_htlcs.push({
9479 let mut out = InboundHTLCOutput{
9481 amount_msat: 2000000,
9483 payment_hash: PaymentHash([0; 32]),
9484 state: InboundHTLCState::Committed,
9486 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9489 chan.context.pending_outbound_htlcs.clear();
9490 chan.context.pending_outbound_htlcs.push({
9491 let mut out = OutboundHTLCOutput{
9493 amount_msat: 5000001,
9495 payment_hash: PaymentHash([0; 32]),
9496 state: OutboundHTLCState::Committed,
9497 source: HTLCSource::dummy(),
9498 skimmed_fee_msat: None,
9499 blinding_point: None,
9501 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9504 chan.context.pending_outbound_htlcs.push({
9505 let mut out = OutboundHTLCOutput{
9507 amount_msat: 5000000,
9509 payment_hash: PaymentHash([0; 32]),
9510 state: OutboundHTLCState::Committed,
9511 source: HTLCSource::dummy(),
9512 skimmed_fee_msat: None,
9513 blinding_point: None,
9515 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9519 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9520 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9521 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9524 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9525 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9526 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9528 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9529 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9530 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9532 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9533 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9534 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9537 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9538 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9539 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9540 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9543 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9544 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9545 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9547 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9548 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9549 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9551 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9552 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9553 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9558 fn test_per_commitment_secret_gen() {
9559 // Test vectors from BOLT 3 Appendix D:
9561 let mut seed = [0; 32];
9562 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9563 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9564 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9566 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9567 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9568 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9570 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9571 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9573 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9574 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9576 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9577 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9578 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9582 fn test_key_derivation() {
9583 // Test vectors from BOLT 3 Appendix E:
9584 let secp_ctx = Secp256k1::new();
9586 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9587 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9589 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9590 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9592 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9593 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9595 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9596 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9598 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9599 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9601 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9602 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9606 fn test_zero_conf_channel_type_support() {
9607 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9608 let secp_ctx = Secp256k1::new();
9609 let seed = [42; 32];
9610 let network = Network::Testnet;
9611 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9612 let logger = test_utils::TestLogger::new();
9614 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9615 let config = UserConfig::default();
9616 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9617 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9619 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9620 channel_type_features.set_zero_conf_required();
9622 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9623 open_channel_msg.channel_type = Some(channel_type_features);
9624 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9625 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9626 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9627 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9628 assert!(res.is_ok());
9632 fn test_supports_anchors_zero_htlc_tx_fee() {
9633 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9634 // resulting `channel_type`.
9635 let secp_ctx = Secp256k1::new();
9636 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9637 let network = Network::Testnet;
9638 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9639 let logger = test_utils::TestLogger::new();
9641 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9642 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9644 let mut config = UserConfig::default();
9645 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9647 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9648 // need to signal it.
9649 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9650 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9651 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9652 &config, 0, 42, None
9654 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9656 let mut expected_channel_type = ChannelTypeFeatures::empty();
9657 expected_channel_type.set_static_remote_key_required();
9658 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9660 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9661 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9662 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9666 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9667 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9668 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9669 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9670 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9673 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9674 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9678 fn test_rejects_implicit_simple_anchors() {
9679 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9680 // each side's `InitFeatures`, it is rejected.
9681 let secp_ctx = Secp256k1::new();
9682 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9683 let network = Network::Testnet;
9684 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9685 let logger = test_utils::TestLogger::new();
9687 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9688 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9690 let config = UserConfig::default();
9692 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9693 let static_remote_key_required: u64 = 1 << 12;
9694 let simple_anchors_required: u64 = 1 << 20;
9695 let raw_init_features = static_remote_key_required | simple_anchors_required;
9696 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9698 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9699 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9700 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9704 // Set `channel_type` to `None` to force the implicit feature negotiation.
9705 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9706 open_channel_msg.channel_type = None;
9708 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9709 // `static_remote_key`, it will fail the channel.
9710 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9711 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9712 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9713 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9715 assert!(channel_b.is_err());
9719 fn test_rejects_simple_anchors_channel_type() {
9720 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9722 let secp_ctx = Secp256k1::new();
9723 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9724 let network = Network::Testnet;
9725 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9726 let logger = test_utils::TestLogger::new();
9728 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9729 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9731 let config = UserConfig::default();
9733 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9734 let static_remote_key_required: u64 = 1 << 12;
9735 let simple_anchors_required: u64 = 1 << 20;
9736 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9737 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9738 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9739 assert!(!simple_anchors_init.requires_unknown_bits());
9740 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9742 // First, we'll try to open a channel between A and B where A requests a channel type for
9743 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9744 // B as it's not supported by LDK.
9745 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9746 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9747 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9751 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9752 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9754 let res = InboundV1Channel::<&TestKeysInterface>::new(
9755 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9756 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9757 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9759 assert!(res.is_err());
9761 // Then, we'll try to open another channel where A requests a channel type for
9762 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9763 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9765 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9766 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9767 10000000, 100000, 42, &config, 0, 42, None
9770 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9772 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9773 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9774 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9775 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9778 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9779 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9781 let res = channel_a.accept_channel(
9782 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9784 assert!(res.is_err());
9788 fn test_waiting_for_batch() {
9789 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9790 let logger = test_utils::TestLogger::new();
9791 let secp_ctx = Secp256k1::new();
9792 let seed = [42; 32];
9793 let network = Network::Testnet;
9794 let best_block = BestBlock::from_network(network);
9795 let chain_hash = ChainHash::using_genesis_block(network);
9796 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9798 let mut config = UserConfig::default();
9799 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9800 // channel in a batch before all channels are ready.
9801 config.channel_handshake_limits.trust_own_funding_0conf = true;
9803 // Create a channel from node a to node b that will be part of batch funding.
9804 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9805 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9810 &channelmanager::provided_init_features(&config),
9820 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9821 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9822 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9827 &channelmanager::provided_channel_type_features(&config),
9828 &channelmanager::provided_init_features(&config),
9834 true, // Allow node b to send a 0conf channel_ready.
9837 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9838 node_a_chan.accept_channel(
9839 &accept_channel_msg,
9840 &config.channel_handshake_limits,
9841 &channelmanager::provided_init_features(&config),
9844 // Fund the channel with a batch funding transaction.
9845 let output_script = node_a_chan.context.get_funding_redeemscript();
9846 let tx = Transaction {
9848 lock_time: LockTime::ZERO,
9852 value: 10000000, script_pubkey: output_script.clone(),
9855 value: 10000000, script_pubkey: Builder::new().into_script(),
9858 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9859 let funding_created_msg = node_a_chan.get_funding_created(
9860 tx.clone(), funding_outpoint, true, &&logger,
9861 ).map_err(|_| ()).unwrap();
9862 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9863 &funding_created_msg.unwrap(),
9867 ).map_err(|_| ()).unwrap();
9868 let node_b_updates = node_b_chan.monitor_updating_restored(
9876 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9877 // broadcasting the funding transaction until the batch is ready.
9878 let res = node_a_chan.funding_signed(
9879 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
9881 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9882 let node_a_updates = node_a_chan.monitor_updating_restored(
9889 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9890 // as the funding transaction depends on all channels in the batch becoming ready.
9891 assert!(node_a_updates.channel_ready.is_none());
9892 assert!(node_a_updates.funding_broadcastable.is_none());
9893 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
9895 // It is possible to receive a 0conf channel_ready from the remote node.
9896 node_a_chan.channel_ready(
9897 &node_b_updates.channel_ready.unwrap(),
9905 node_a_chan.context.channel_state,
9906 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
9909 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9910 node_a_chan.set_batch_ready();
9911 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
9912 assert!(node_a_chan.check_get_channel_ready(0).is_some());