1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
264 macro_rules! define_state_flags {
265 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
266 #[doc = $flag_type_doc]
267 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
268 struct $flag_type(u32);
273 const $flag: $flag_type = $flag_type($value);
276 /// All flags that apply to the specified [`ChannelState`] variant.
278 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
281 fn new() -> Self { Self(0) }
284 fn from_u32(flags: u32) -> Result<Self, ()> {
285 if flags & !Self::ALL.0 != 0 {
288 Ok($flag_type(flags))
293 fn is_empty(&self) -> bool { self.0 == 0 }
296 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
299 impl core::ops::Not for $flag_type {
301 fn not(self) -> Self::Output { Self(!self.0) }
303 impl core::ops::BitOr for $flag_type {
305 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
307 impl core::ops::BitOrAssign for $flag_type {
308 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
310 impl core::ops::BitAnd for $flag_type {
312 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
314 impl core::ops::BitAndAssign for $flag_type {
315 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
318 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
319 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
321 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
322 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
323 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
325 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
327 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
328 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
330 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
332 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
334 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
335 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
337 impl PartialEq<FundedStateFlags> for $flag_type {
338 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
340 impl From<FundedStateFlags> for $flag_type {
341 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
346 /// We declare all the states/flags here together to help determine which bits are still available
349 pub const OUR_INIT_SENT: u32 = 1 << 0;
350 pub const THEIR_INIT_SENT: u32 = 1 << 1;
351 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
352 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
353 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
354 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
355 pub const CHANNEL_READY: u32 = 1 << 6;
356 pub const PEER_DISCONNECTED: u32 = 1 << 7;
357 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
358 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
359 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
360 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
361 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
362 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
366 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
368 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
369 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
370 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
371 somewhere and we should pause sending any outbound messages until they've managed to \
372 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
373 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
374 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
375 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
376 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
377 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
382 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
383 NegotiatingFundingFlags, [
384 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
385 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
386 ("Indicates we have received their `open_channel`/`accept_channel` message.",
387 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
392 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
393 FUNDED_STATE, AwaitingChannelReadyFlags, [
394 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
395 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
396 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
397 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
398 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
399 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
400 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
401 is being held until all channels in the batch have received `funding_signed` and have \
402 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
407 "Flags that only apply to [`ChannelState::ChannelReady`].",
408 FUNDED_STATE, ChannelReadyFlags, [
409 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
410 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
411 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
412 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
413 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
417 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
419 /// We are negotiating the parameters required for the channel prior to funding it.
420 NegotiatingFunding(NegotiatingFundingFlags),
421 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
422 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
423 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
425 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
426 /// funding transaction to confirm.
427 AwaitingChannelReady(AwaitingChannelReadyFlags),
428 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
430 ChannelReady(ChannelReadyFlags),
431 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
432 /// is about to drop us, but we store this anyway.
436 macro_rules! impl_state_flag {
437 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
439 fn $get(&self) -> bool {
442 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
451 ChannelState::$state(flags) => *flags |= $state_flag,
453 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
457 fn $clear(&mut self) {
460 ChannelState::$state(flags) => *flags &= !($state_flag),
462 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
466 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
467 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
469 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
470 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
475 fn from_u32(state: u32) -> Result<Self, ()> {
477 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
478 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
480 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
481 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
482 .map(|flags| ChannelState::AwaitingChannelReady(flags))
483 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
484 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
485 .map(|flags| ChannelState::ChannelReady(flags))
486 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
487 Ok(ChannelState::NegotiatingFunding(flags))
495 fn to_u32(&self) -> u32 {
497 ChannelState::NegotiatingFunding(flags) => flags.0,
498 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
499 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
500 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
501 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
505 fn is_pre_funded_state(&self) -> bool {
506 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
509 fn is_both_sides_shutdown(&self) -> bool {
510 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
513 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
515 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
516 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
517 _ => FundedStateFlags::new(),
521 fn should_force_holding_cell(&self) -> bool {
523 ChannelState::ChannelReady(flags) =>
524 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
525 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
526 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
528 debug_assert!(false, "The holding cell is only valid within ChannelReady");
534 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
535 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
536 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
537 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
538 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
539 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
540 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
541 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
542 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
543 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
544 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
545 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
546 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
547 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
548 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
549 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
552 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
554 pub const DEFAULT_MAX_HTLCS: u16 = 50;
556 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
557 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
558 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
559 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
563 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
565 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
567 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
569 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
570 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
571 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
572 /// `holder_max_htlc_value_in_flight_msat`.
573 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
575 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
576 /// `option_support_large_channel` (aka wumbo channels) is not supported.
578 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
580 /// Total bitcoin supply in satoshis.
581 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
583 /// The maximum network dust limit for standard script formats. This currently represents the
584 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
585 /// transaction non-standard and thus refuses to relay it.
586 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
587 /// implementations use this value for their dust limit today.
588 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
590 /// The maximum channel dust limit we will accept from our counterparty.
591 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
593 /// The dust limit is used for both the commitment transaction outputs as well as the closing
594 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
595 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
596 /// In order to avoid having to concern ourselves with standardness during the closing process, we
597 /// simply require our counterparty to use a dust limit which will leave any segwit output
599 /// See <https://github.com/lightning/bolts/issues/905> for more details.
600 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
602 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
603 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
605 /// Used to return a simple Error back to ChannelManager. Will get converted to a
606 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
607 /// channel_id in ChannelManager.
608 pub(super) enum ChannelError {
614 impl fmt::Debug for ChannelError {
615 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
617 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
618 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
619 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
624 impl fmt::Display for ChannelError {
625 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
627 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
628 &ChannelError::Warn(ref e) => write!(f, "{}", e),
629 &ChannelError::Close(ref e) => write!(f, "{}", e),
634 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
636 pub peer_id: Option<PublicKey>,
637 pub channel_id: Option<ChannelId>,
640 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
641 fn log(&self, mut record: Record) {
642 record.peer_id = self.peer_id;
643 record.channel_id = self.channel_id;
644 self.logger.log(record)
648 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
649 where L::Target: Logger {
650 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
651 where S::Target: SignerProvider
655 peer_id: Some(context.counterparty_node_id),
656 channel_id: Some(context.channel_id),
661 macro_rules! secp_check {
662 ($res: expr, $err: expr) => {
665 Err(_) => return Err(ChannelError::Close($err)),
670 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
671 /// our counterparty or not. However, we don't want to announce updates right away to avoid
672 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
673 /// our channel_update message and track the current state here.
674 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
675 #[derive(Clone, Copy, PartialEq)]
676 pub(super) enum ChannelUpdateStatus {
677 /// We've announced the channel as enabled and are connected to our peer.
679 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
681 /// Our channel is live again, but we haven't announced the channel as enabled yet.
683 /// We've announced the channel as disabled.
687 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
689 pub enum AnnouncementSigsState {
690 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
691 /// we sent the last `AnnouncementSignatures`.
693 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
694 /// This state never appears on disk - instead we write `NotSent`.
696 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
697 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
698 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
699 /// they send back a `RevokeAndACK`.
700 /// This state never appears on disk - instead we write `NotSent`.
702 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
703 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
707 /// An enum indicating whether the local or remote side offered a given HTLC.
713 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
716 pending_htlcs_value_msat: u64,
717 on_counterparty_tx_dust_exposure_msat: u64,
718 on_holder_tx_dust_exposure_msat: u64,
719 holding_cell_msat: u64,
720 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
723 /// An enum gathering stats on commitment transaction, either local or remote.
724 struct CommitmentStats<'a> {
725 tx: CommitmentTransaction, // the transaction info
726 feerate_per_kw: u32, // the feerate included to build the transaction
727 total_fee_sat: u64, // the total fee included in the transaction
728 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
729 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
730 local_balance_msat: u64, // local balance before fees but considering dust limits
731 remote_balance_msat: u64, // remote balance before fees but considering dust limits
732 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
733 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
736 /// Used when calculating whether we or the remote can afford an additional HTLC.
737 struct HTLCCandidate {
739 origin: HTLCInitiator,
743 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
751 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
753 enum UpdateFulfillFetch {
755 monitor_update: ChannelMonitorUpdate,
756 htlc_value_msat: u64,
757 msg: Option<msgs::UpdateFulfillHTLC>,
762 /// The return type of get_update_fulfill_htlc_and_commit.
763 pub enum UpdateFulfillCommitFetch {
764 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
765 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
766 /// previously placed in the holding cell (and has since been removed).
768 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
769 monitor_update: ChannelMonitorUpdate,
770 /// The value of the HTLC which was claimed, in msat.
771 htlc_value_msat: u64,
773 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
774 /// or has been forgotten (presumably previously claimed).
778 /// The return value of `monitor_updating_restored`
779 pub(super) struct MonitorRestoreUpdates {
780 pub raa: Option<msgs::RevokeAndACK>,
781 pub commitment_update: Option<msgs::CommitmentUpdate>,
782 pub order: RAACommitmentOrder,
783 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
784 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
785 pub finalized_claimed_htlcs: Vec<HTLCSource>,
786 pub funding_broadcastable: Option<Transaction>,
787 pub channel_ready: Option<msgs::ChannelReady>,
788 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
791 /// The return value of `signer_maybe_unblocked`
793 pub(super) struct SignerResumeUpdates {
794 pub commitment_update: Option<msgs::CommitmentUpdate>,
795 pub funding_signed: Option<msgs::FundingSigned>,
796 pub channel_ready: Option<msgs::ChannelReady>,
799 /// The return value of `channel_reestablish`
800 pub(super) struct ReestablishResponses {
801 pub channel_ready: Option<msgs::ChannelReady>,
802 pub raa: Option<msgs::RevokeAndACK>,
803 pub commitment_update: Option<msgs::CommitmentUpdate>,
804 pub order: RAACommitmentOrder,
805 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
806 pub shutdown_msg: Option<msgs::Shutdown>,
809 /// The result of a shutdown that should be handled.
811 pub(crate) struct ShutdownResult {
812 /// A channel monitor update to apply.
813 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
814 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
815 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
816 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
817 /// propagated to the remainder of the batch.
818 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
819 pub(crate) channel_id: ChannelId,
820 pub(crate) counterparty_node_id: PublicKey,
823 /// If the majority of the channels funds are to the fundee and the initiator holds only just
824 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
825 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
826 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
827 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
828 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
829 /// by this multiple without hitting this case, before sending.
830 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
831 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
832 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
833 /// leave the channel less usable as we hold a bigger reserve.
834 #[cfg(any(fuzzing, test))]
835 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
836 #[cfg(not(any(fuzzing, test)))]
837 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
839 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
840 /// channel creation on an inbound channel, we simply force-close and move on.
841 /// This constant is the one suggested in BOLT 2.
842 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
844 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
845 /// not have enough balance value remaining to cover the onchain cost of this new
846 /// HTLC weight. If this happens, our counterparty fails the reception of our
847 /// commitment_signed including this new HTLC due to infringement on the channel
849 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
850 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
851 /// leads to a channel force-close. Ultimately, this is an issue coming from the
852 /// design of LN state machines, allowing asynchronous updates.
853 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
855 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
856 /// commitment transaction fees, with at least this many HTLCs present on the commitment
857 /// transaction (not counting the value of the HTLCs themselves).
858 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
860 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
861 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
862 /// ChannelUpdate prompted by the config update. This value was determined as follows:
864 /// * The expected interval between ticks (1 minute).
865 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
866 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
867 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
868 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
870 /// The number of ticks that may elapse while we're waiting for a response to a
871 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
874 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
875 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
877 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
878 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
879 /// exceeding this age limit will be force-closed and purged from memory.
880 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
882 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
883 pub(crate) const COINBASE_MATURITY: u32 = 100;
885 struct PendingChannelMonitorUpdate {
886 update: ChannelMonitorUpdate,
889 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
890 (0, update, required),
893 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
894 /// its variants containing an appropriate channel struct.
895 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
896 UnfundedOutboundV1(OutboundV1Channel<SP>),
897 UnfundedInboundV1(InboundV1Channel<SP>),
901 impl<'a, SP: Deref> ChannelPhase<SP> where
902 SP::Target: SignerProvider,
903 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
905 pub fn context(&'a self) -> &'a ChannelContext<SP> {
907 ChannelPhase::Funded(chan) => &chan.context,
908 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
909 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
913 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
915 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
916 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
917 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
922 /// Contains all state common to unfunded inbound/outbound channels.
923 pub(super) struct UnfundedChannelContext {
924 /// A counter tracking how many ticks have elapsed since this unfunded channel was
925 /// created. If this unfunded channel reaches peer has yet to respond after reaching
926 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
928 /// This is so that we don't keep channels around that haven't progressed to a funded state
929 /// in a timely manner.
930 unfunded_channel_age_ticks: usize,
933 impl UnfundedChannelContext {
934 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
935 /// having reached the unfunded channel age limit.
937 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
938 pub fn should_expire_unfunded_channel(&mut self) -> bool {
939 self.unfunded_channel_age_ticks += 1;
940 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
944 /// Contains everything about the channel including state, and various flags.
945 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
946 config: LegacyChannelConfig,
948 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
949 // constructed using it. The second element in the tuple corresponds to the number of ticks that
950 // have elapsed since the update occurred.
951 prev_config: Option<(ChannelConfig, usize)>,
953 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
957 /// The current channel ID.
958 channel_id: ChannelId,
959 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
960 /// Will be `None` for channels created prior to 0.0.115.
961 temporary_channel_id: Option<ChannelId>,
962 channel_state: ChannelState,
964 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
965 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
967 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
968 // Note that a number of our tests were written prior to the behavior here which retransmits
969 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
971 #[cfg(any(test, feature = "_test_utils"))]
972 pub(crate) announcement_sigs_state: AnnouncementSigsState,
973 #[cfg(not(any(test, feature = "_test_utils")))]
974 announcement_sigs_state: AnnouncementSigsState,
976 secp_ctx: Secp256k1<secp256k1::All>,
977 channel_value_satoshis: u64,
979 latest_monitor_update_id: u64,
981 holder_signer: ChannelSignerType<SP>,
982 shutdown_scriptpubkey: Option<ShutdownScript>,
983 destination_script: ScriptBuf,
985 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
986 // generation start at 0 and count up...this simplifies some parts of implementation at the
987 // cost of others, but should really just be changed.
989 cur_holder_commitment_transaction_number: u64,
990 cur_counterparty_commitment_transaction_number: u64,
991 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
992 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
993 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
994 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
996 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
997 /// need to ensure we resend them in the order we originally generated them. Note that because
998 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
999 /// sufficient to simply set this to the opposite of any message we are generating as we
1000 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1001 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1003 resend_order: RAACommitmentOrder,
1005 monitor_pending_channel_ready: bool,
1006 monitor_pending_revoke_and_ack: bool,
1007 monitor_pending_commitment_signed: bool,
1009 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1010 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1011 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1012 // but need to handle this somehow or we run the risk of losing HTLCs!
1013 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1014 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1015 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1017 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1018 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1019 /// the future when the signer indicates it may have a signature for us.
1021 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1022 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1023 signer_pending_commitment_update: bool,
1024 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1025 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1026 /// outbound or inbound.
1027 signer_pending_funding: bool,
1029 // pending_update_fee is filled when sending and receiving update_fee.
1031 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1032 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1033 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1034 // HTLCs with similar state.
1035 pending_update_fee: Option<(u32, FeeUpdateState)>,
1036 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1037 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1038 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1039 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1040 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1041 holding_cell_update_fee: Option<u32>,
1042 next_holder_htlc_id: u64,
1043 next_counterparty_htlc_id: u64,
1044 feerate_per_kw: u32,
1046 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1047 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1048 /// new block is received, ensuring it's always at least moderately close to the current real
1050 update_time_counter: u32,
1052 #[cfg(debug_assertions)]
1053 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1054 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1055 #[cfg(debug_assertions)]
1056 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1057 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1059 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1060 target_closing_feerate_sats_per_kw: Option<u32>,
1062 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1063 /// update, we need to delay processing it until later. We do that here by simply storing the
1064 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1065 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1067 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1068 /// transaction. These are set once we reach `closing_negotiation_ready`.
1070 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1072 closing_fee_limits: Option<(u64, u64)>,
1074 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1075 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1076 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1077 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1078 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1080 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1081 /// until we see a `commitment_signed` before doing so.
1083 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1084 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1085 expecting_peer_commitment_signed: bool,
1087 /// The hash of the block in which the funding transaction was included.
1088 funding_tx_confirmed_in: Option<BlockHash>,
1089 funding_tx_confirmation_height: u32,
1090 short_channel_id: Option<u64>,
1091 /// Either the height at which this channel was created or the height at which it was last
1092 /// serialized if it was serialized by versions prior to 0.0.103.
1093 /// We use this to close if funding is never broadcasted.
1094 channel_creation_height: u32,
1096 counterparty_dust_limit_satoshis: u64,
1099 pub(super) holder_dust_limit_satoshis: u64,
1101 holder_dust_limit_satoshis: u64,
1104 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1106 counterparty_max_htlc_value_in_flight_msat: u64,
1109 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1111 holder_max_htlc_value_in_flight_msat: u64,
1113 /// minimum channel reserve for self to maintain - set by them.
1114 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1117 pub(super) holder_selected_channel_reserve_satoshis: u64,
1119 holder_selected_channel_reserve_satoshis: u64,
1121 counterparty_htlc_minimum_msat: u64,
1122 holder_htlc_minimum_msat: u64,
1124 pub counterparty_max_accepted_htlcs: u16,
1126 counterparty_max_accepted_htlcs: u16,
1127 holder_max_accepted_htlcs: u16,
1128 minimum_depth: Option<u32>,
1130 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1132 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1133 funding_transaction: Option<Transaction>,
1134 is_batch_funding: Option<()>,
1136 counterparty_cur_commitment_point: Option<PublicKey>,
1137 counterparty_prev_commitment_point: Option<PublicKey>,
1138 counterparty_node_id: PublicKey,
1140 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1142 commitment_secrets: CounterpartyCommitmentSecrets,
1144 channel_update_status: ChannelUpdateStatus,
1145 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1146 /// not complete within a single timer tick (one minute), we should force-close the channel.
1147 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1149 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1150 /// our peer and start the closing_signed negotiation fresh.
1151 closing_signed_in_flight: bool,
1153 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1154 /// This can be used to rebroadcast the channel_announcement message later.
1155 announcement_sigs: Option<(Signature, Signature)>,
1157 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1158 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1159 // be, by comparing the cached values to the fee of the tranaction generated by
1160 // `build_commitment_transaction`.
1161 #[cfg(any(test, fuzzing))]
1162 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1163 #[cfg(any(test, fuzzing))]
1164 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1166 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1167 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1168 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1169 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1170 /// message until we receive a channel_reestablish.
1172 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1173 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1175 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1176 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1177 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1178 /// unblock the state machine.
1180 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1181 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1182 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1184 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1185 /// [`msgs::RevokeAndACK`] message from the counterparty.
1186 sent_message_awaiting_response: Option<usize>,
1188 #[cfg(any(test, fuzzing))]
1189 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1190 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1191 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1192 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1193 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1194 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1195 historical_inbound_htlc_fulfills: HashSet<u64>,
1197 /// This channel's type, as negotiated during channel open
1198 channel_type: ChannelTypeFeatures,
1200 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1201 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1202 // the channel's funding UTXO.
1204 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1205 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1206 // associated channel mapping.
1208 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1209 // to store all of them.
1210 latest_inbound_scid_alias: Option<u64>,
1212 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1213 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1214 // don't currently support node id aliases and eventually privacy should be provided with
1215 // blinded paths instead of simple scid+node_id aliases.
1216 outbound_scid_alias: u64,
1218 // We track whether we already emitted a `ChannelPending` event.
1219 channel_pending_event_emitted: bool,
1221 // We track whether we already emitted a `ChannelReady` event.
1222 channel_ready_event_emitted: bool,
1224 /// The unique identifier used to re-derive the private key material for the channel through
1225 /// [`SignerProvider::derive_channel_signer`].
1226 channel_keys_id: [u8; 32],
1228 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1229 /// store it here and only release it to the `ChannelManager` once it asks for it.
1230 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1233 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1234 /// Allowed in any state (including after shutdown)
1235 pub fn get_update_time_counter(&self) -> u32 {
1236 self.update_time_counter
1239 pub fn get_latest_monitor_update_id(&self) -> u64 {
1240 self.latest_monitor_update_id
1243 pub fn should_announce(&self) -> bool {
1244 self.config.announced_channel
1247 pub fn is_outbound(&self) -> bool {
1248 self.channel_transaction_parameters.is_outbound_from_holder
1251 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1252 /// Allowed in any state (including after shutdown)
1253 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1254 self.config.options.forwarding_fee_base_msat
1257 /// Returns true if we've ever received a message from the remote end for this Channel
1258 pub fn have_received_message(&self) -> bool {
1259 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1262 /// Returns true if this channel is fully established and not known to be closing.
1263 /// Allowed in any state (including after shutdown)
1264 pub fn is_usable(&self) -> bool {
1265 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1266 !self.channel_state.is_local_shutdown_sent() &&
1267 !self.channel_state.is_remote_shutdown_sent() &&
1268 !self.monitor_pending_channel_ready
1271 /// shutdown state returns the state of the channel in its various stages of shutdown
1272 pub fn shutdown_state(&self) -> ChannelShutdownState {
1273 match self.channel_state {
1274 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1275 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1276 ChannelShutdownState::ShutdownInitiated
1277 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1278 ChannelShutdownState::ResolvingHTLCs
1279 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1280 ChannelShutdownState::NegotiatingClosingFee
1282 ChannelShutdownState::NotShuttingDown
1284 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1285 _ => ChannelShutdownState::NotShuttingDown,
1289 fn closing_negotiation_ready(&self) -> bool {
1290 let is_ready_to_close = match self.channel_state {
1291 ChannelState::AwaitingChannelReady(flags) =>
1292 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1293 ChannelState::ChannelReady(flags) =>
1294 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1297 self.pending_inbound_htlcs.is_empty() &&
1298 self.pending_outbound_htlcs.is_empty() &&
1299 self.pending_update_fee.is_none() &&
1303 /// Returns true if this channel is currently available for use. This is a superset of
1304 /// is_usable() and considers things like the channel being temporarily disabled.
1305 /// Allowed in any state (including after shutdown)
1306 pub fn is_live(&self) -> bool {
1307 self.is_usable() && !self.channel_state.is_peer_disconnected()
1310 // Public utilities:
1312 pub fn channel_id(&self) -> ChannelId {
1316 // Return the `temporary_channel_id` used during channel establishment.
1318 // Will return `None` for channels created prior to LDK version 0.0.115.
1319 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1320 self.temporary_channel_id
1323 pub fn minimum_depth(&self) -> Option<u32> {
1327 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1328 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1329 pub fn get_user_id(&self) -> u128 {
1333 /// Gets the channel's type
1334 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1338 /// Gets the channel's `short_channel_id`.
1340 /// Will return `None` if the channel hasn't been confirmed yet.
1341 pub fn get_short_channel_id(&self) -> Option<u64> {
1342 self.short_channel_id
1345 /// Allowed in any state (including after shutdown)
1346 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1347 self.latest_inbound_scid_alias
1350 /// Allowed in any state (including after shutdown)
1351 pub fn outbound_scid_alias(&self) -> u64 {
1352 self.outbound_scid_alias
1355 /// Returns the holder signer for this channel.
1357 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1358 return &self.holder_signer
1361 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1362 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1363 /// or prior to any channel actions during `Channel` initialization.
1364 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1365 debug_assert_eq!(self.outbound_scid_alias, 0);
1366 self.outbound_scid_alias = outbound_scid_alias;
1369 /// Returns the funding_txo we either got from our peer, or were given by
1370 /// get_funding_created.
1371 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1372 self.channel_transaction_parameters.funding_outpoint
1375 /// Returns the height in which our funding transaction was confirmed.
1376 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1377 let conf_height = self.funding_tx_confirmation_height;
1378 if conf_height > 0 {
1385 /// Returns the block hash in which our funding transaction was confirmed.
1386 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1387 self.funding_tx_confirmed_in
1390 /// Returns the current number of confirmations on the funding transaction.
1391 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1392 if self.funding_tx_confirmation_height == 0 {
1393 // We either haven't seen any confirmation yet, or observed a reorg.
1397 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1400 fn get_holder_selected_contest_delay(&self) -> u16 {
1401 self.channel_transaction_parameters.holder_selected_contest_delay
1404 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1405 &self.channel_transaction_parameters.holder_pubkeys
1408 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1409 self.channel_transaction_parameters.counterparty_parameters
1410 .as_ref().map(|params| params.selected_contest_delay)
1413 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1414 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1417 /// Allowed in any state (including after shutdown)
1418 pub fn get_counterparty_node_id(&self) -> PublicKey {
1419 self.counterparty_node_id
1422 /// Allowed in any state (including after shutdown)
1423 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1424 self.holder_htlc_minimum_msat
1427 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1428 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1429 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1432 /// Allowed in any state (including after shutdown)
1433 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1435 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1436 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1437 // channel might have been used to route very small values (either by honest users or as DoS).
1438 self.channel_value_satoshis * 1000 * 9 / 10,
1440 self.counterparty_max_htlc_value_in_flight_msat
1444 /// Allowed in any state (including after shutdown)
1445 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1446 self.counterparty_htlc_minimum_msat
1449 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1450 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1451 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1454 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1455 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1456 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1458 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1459 party_max_htlc_value_in_flight_msat
1464 pub fn get_value_satoshis(&self) -> u64 {
1465 self.channel_value_satoshis
1468 pub fn get_fee_proportional_millionths(&self) -> u32 {
1469 self.config.options.forwarding_fee_proportional_millionths
1472 pub fn get_cltv_expiry_delta(&self) -> u16 {
1473 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1476 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1477 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1478 where F::Target: FeeEstimator
1480 match self.config.options.max_dust_htlc_exposure {
1481 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1482 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1483 ConfirmationTarget::OnChainSweep) as u64;
1484 feerate_per_kw.saturating_mul(multiplier)
1486 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1490 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1491 pub fn prev_config(&self) -> Option<ChannelConfig> {
1492 self.prev_config.map(|prev_config| prev_config.0)
1495 // Checks whether we should emit a `ChannelPending` event.
1496 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1497 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1500 // Returns whether we already emitted a `ChannelPending` event.
1501 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1502 self.channel_pending_event_emitted
1505 // Remembers that we already emitted a `ChannelPending` event.
1506 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1507 self.channel_pending_event_emitted = true;
1510 // Checks whether we should emit a `ChannelReady` event.
1511 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1512 self.is_usable() && !self.channel_ready_event_emitted
1515 // Remembers that we already emitted a `ChannelReady` event.
1516 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1517 self.channel_ready_event_emitted = true;
1520 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1521 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1522 /// no longer be considered when forwarding HTLCs.
1523 pub fn maybe_expire_prev_config(&mut self) {
1524 if self.prev_config.is_none() {
1527 let prev_config = self.prev_config.as_mut().unwrap();
1529 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1530 self.prev_config = None;
1534 /// Returns the current [`ChannelConfig`] applied to the channel.
1535 pub fn config(&self) -> ChannelConfig {
1539 /// Updates the channel's config. A bool is returned indicating whether the config update
1540 /// applied resulted in a new ChannelUpdate message.
1541 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1542 let did_channel_update =
1543 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1544 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1545 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1546 if did_channel_update {
1547 self.prev_config = Some((self.config.options, 0));
1548 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1549 // policy change to propagate throughout the network.
1550 self.update_time_counter += 1;
1552 self.config.options = *config;
1556 /// Returns true if funding_signed was sent/received and the
1557 /// funding transaction has been broadcast if necessary.
1558 pub fn is_funding_broadcast(&self) -> bool {
1559 !self.channel_state.is_pre_funded_state() &&
1560 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1563 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1564 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1565 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1566 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1567 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1569 /// @local is used only to convert relevant internal structures which refer to remote vs local
1570 /// to decide value of outputs and direction of HTLCs.
1571 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1572 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1573 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1574 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1575 /// which peer generated this transaction and "to whom" this transaction flows.
1577 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1578 where L::Target: Logger
1580 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1581 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1582 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1584 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1585 let mut remote_htlc_total_msat = 0;
1586 let mut local_htlc_total_msat = 0;
1587 let mut value_to_self_msat_offset = 0;
1589 let mut feerate_per_kw = self.feerate_per_kw;
1590 if let Some((feerate, update_state)) = self.pending_update_fee {
1591 if match update_state {
1592 // Note that these match the inclusion criteria when scanning
1593 // pending_inbound_htlcs below.
1594 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1595 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1596 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1598 feerate_per_kw = feerate;
1602 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1603 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1604 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1606 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1608 macro_rules! get_htlc_in_commitment {
1609 ($htlc: expr, $offered: expr) => {
1610 HTLCOutputInCommitment {
1612 amount_msat: $htlc.amount_msat,
1613 cltv_expiry: $htlc.cltv_expiry,
1614 payment_hash: $htlc.payment_hash,
1615 transaction_output_index: None
1620 macro_rules! add_htlc_output {
1621 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1622 if $outbound == local { // "offered HTLC output"
1623 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1624 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1627 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1629 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1630 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1631 included_non_dust_htlcs.push((htlc_in_tx, $source));
1633 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1634 included_dust_htlcs.push((htlc_in_tx, $source));
1637 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1638 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1641 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1643 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1644 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1645 included_non_dust_htlcs.push((htlc_in_tx, $source));
1647 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1648 included_dust_htlcs.push((htlc_in_tx, $source));
1654 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1656 for ref htlc in self.pending_inbound_htlcs.iter() {
1657 let (include, state_name) = match htlc.state {
1658 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1659 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1660 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1661 InboundHTLCState::Committed => (true, "Committed"),
1662 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1666 add_htlc_output!(htlc, false, None, state_name);
1667 remote_htlc_total_msat += htlc.amount_msat;
1669 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1671 &InboundHTLCState::LocalRemoved(ref reason) => {
1672 if generated_by_local {
1673 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1674 inbound_htlc_preimages.push(preimage);
1675 value_to_self_msat_offset += htlc.amount_msat as i64;
1685 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1687 for ref htlc in self.pending_outbound_htlcs.iter() {
1688 let (include, state_name) = match htlc.state {
1689 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1690 OutboundHTLCState::Committed => (true, "Committed"),
1691 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1692 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1693 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1696 let preimage_opt = match htlc.state {
1697 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1698 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1699 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1703 if let Some(preimage) = preimage_opt {
1704 outbound_htlc_preimages.push(preimage);
1708 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1709 local_htlc_total_msat += htlc.amount_msat;
1711 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1713 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1714 value_to_self_msat_offset -= htlc.amount_msat as i64;
1716 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1717 if !generated_by_local {
1718 value_to_self_msat_offset -= htlc.amount_msat as i64;
1726 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1727 assert!(value_to_self_msat >= 0);
1728 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1729 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1730 // "violate" their reserve value by couting those against it. Thus, we have to convert
1731 // everything to i64 before subtracting as otherwise we can overflow.
1732 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1733 assert!(value_to_remote_msat >= 0);
1735 #[cfg(debug_assertions)]
1737 // Make sure that the to_self/to_remote is always either past the appropriate
1738 // channel_reserve *or* it is making progress towards it.
1739 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1740 self.holder_max_commitment_tx_output.lock().unwrap()
1742 self.counterparty_max_commitment_tx_output.lock().unwrap()
1744 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1745 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1746 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1747 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1750 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1751 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1752 let (value_to_self, value_to_remote) = if self.is_outbound() {
1753 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1755 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1758 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1759 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1760 let (funding_pubkey_a, funding_pubkey_b) = if local {
1761 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1763 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1766 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1767 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1772 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1773 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1778 let num_nondust_htlcs = included_non_dust_htlcs.len();
1780 let channel_parameters =
1781 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1782 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1783 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1790 &mut included_non_dust_htlcs,
1793 let mut htlcs_included = included_non_dust_htlcs;
1794 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1795 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1796 htlcs_included.append(&mut included_dust_htlcs);
1798 // For the stats, trimmed-to-0 the value in msats accordingly
1799 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1800 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1808 local_balance_msat: value_to_self_msat as u64,
1809 remote_balance_msat: value_to_remote_msat as u64,
1810 inbound_htlc_preimages,
1811 outbound_htlc_preimages,
1816 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1817 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1818 /// our counterparty!)
1819 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1820 /// TODO Some magic rust shit to compile-time check this?
1821 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1822 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1823 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1824 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1825 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1827 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1831 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1832 /// will sign and send to our counterparty.
1833 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1834 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1835 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1836 //may see payments to it!
1837 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1838 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1839 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1841 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1844 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1845 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1846 /// Panics if called before accept_channel/InboundV1Channel::new
1847 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1848 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1851 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1852 &self.get_counterparty_pubkeys().funding_pubkey
1855 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1859 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1860 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1861 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1862 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1863 // more dust balance if the feerate increases when we have several HTLCs pending
1864 // which are near the dust limit.
1865 let mut feerate_per_kw = self.feerate_per_kw;
1866 // If there's a pending update fee, use it to ensure we aren't under-estimating
1867 // potential feerate updates coming soon.
1868 if let Some((feerate, _)) = self.pending_update_fee {
1869 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1871 if let Some(feerate) = outbound_feerate_update {
1872 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1874 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1877 /// Get forwarding information for the counterparty.
1878 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1879 self.counterparty_forwarding_info.clone()
1882 /// Returns a HTLCStats about inbound pending htlcs
1883 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1885 let mut stats = HTLCStats {
1886 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1887 pending_htlcs_value_msat: 0,
1888 on_counterparty_tx_dust_exposure_msat: 0,
1889 on_holder_tx_dust_exposure_msat: 0,
1890 holding_cell_msat: 0,
1891 on_holder_tx_holding_cell_htlcs_count: 0,
1894 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1897 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1898 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1899 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1901 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1902 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1903 for ref htlc in context.pending_inbound_htlcs.iter() {
1904 stats.pending_htlcs_value_msat += htlc.amount_msat;
1905 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1906 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1908 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1909 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1915 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1916 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1918 let mut stats = HTLCStats {
1919 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1920 pending_htlcs_value_msat: 0,
1921 on_counterparty_tx_dust_exposure_msat: 0,
1922 on_holder_tx_dust_exposure_msat: 0,
1923 holding_cell_msat: 0,
1924 on_holder_tx_holding_cell_htlcs_count: 0,
1927 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1930 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1931 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1932 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1934 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1935 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1936 for ref htlc in context.pending_outbound_htlcs.iter() {
1937 stats.pending_htlcs_value_msat += htlc.amount_msat;
1938 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1939 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1941 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1942 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1946 for update in context.holding_cell_htlc_updates.iter() {
1947 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1948 stats.pending_htlcs += 1;
1949 stats.pending_htlcs_value_msat += amount_msat;
1950 stats.holding_cell_msat += amount_msat;
1951 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1952 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1954 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1955 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1957 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1964 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1965 /// Doesn't bother handling the
1966 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1967 /// corner case properly.
1968 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1969 -> AvailableBalances
1970 where F::Target: FeeEstimator
1972 let context = &self;
1973 // Note that we have to handle overflow due to the above case.
1974 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1975 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1977 let mut balance_msat = context.value_to_self_msat;
1978 for ref htlc in context.pending_inbound_htlcs.iter() {
1979 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1980 balance_msat += htlc.amount_msat;
1983 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1985 let outbound_capacity_msat = context.value_to_self_msat
1986 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1988 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1990 let mut available_capacity_msat = outbound_capacity_msat;
1992 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1993 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1997 if context.is_outbound() {
1998 // We should mind channel commit tx fee when computing how much of the available capacity
1999 // can be used in the next htlc. Mirrors the logic in send_htlc.
2001 // The fee depends on whether the amount we will be sending is above dust or not,
2002 // and the answer will in turn change the amount itself — making it a circular
2004 // This complicates the computation around dust-values, up to the one-htlc-value.
2005 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2006 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2007 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2010 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2011 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2012 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2013 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2014 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2015 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2016 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2019 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2020 // value ends up being below dust, we have this fee available again. In that case,
2021 // match the value to right-below-dust.
2022 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2023 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2024 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2025 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2026 debug_assert!(one_htlc_difference_msat != 0);
2027 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2028 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2029 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2031 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2034 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2035 // sending a new HTLC won't reduce their balance below our reserve threshold.
2036 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2037 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2038 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2041 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2042 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2044 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2045 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2046 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2048 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2049 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2050 // we've selected for them, we can only send dust HTLCs.
2051 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2055 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2057 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2058 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2059 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2060 // send above the dust limit (as the router can always overpay to meet the dust limit).
2061 let mut remaining_msat_below_dust_exposure_limit = None;
2062 let mut dust_exposure_dust_limit_msat = 0;
2063 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2065 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2066 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2068 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2069 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2070 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2072 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2073 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2074 remaining_msat_below_dust_exposure_limit =
2075 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2076 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2079 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2080 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2081 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2082 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2083 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2084 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2087 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2088 if available_capacity_msat < dust_exposure_dust_limit_msat {
2089 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2091 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2095 available_capacity_msat = cmp::min(available_capacity_msat,
2096 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2098 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2099 available_capacity_msat = 0;
2103 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2104 - context.value_to_self_msat as i64
2105 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2106 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2108 outbound_capacity_msat,
2109 next_outbound_htlc_limit_msat: available_capacity_msat,
2110 next_outbound_htlc_minimum_msat,
2115 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2116 let context = &self;
2117 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2120 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2121 /// number of pending HTLCs that are on track to be in our next commitment tx.
2123 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2124 /// `fee_spike_buffer_htlc` is `Some`.
2126 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2127 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2129 /// Dust HTLCs are excluded.
2130 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2131 let context = &self;
2132 assert!(context.is_outbound());
2134 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2137 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2138 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2140 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2141 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2143 let mut addl_htlcs = 0;
2144 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2146 HTLCInitiator::LocalOffered => {
2147 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2151 HTLCInitiator::RemoteOffered => {
2152 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2158 let mut included_htlcs = 0;
2159 for ref htlc in context.pending_inbound_htlcs.iter() {
2160 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2163 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2164 // transaction including this HTLC if it times out before they RAA.
2165 included_htlcs += 1;
2168 for ref htlc in context.pending_outbound_htlcs.iter() {
2169 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2173 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2174 OutboundHTLCState::Committed => included_htlcs += 1,
2175 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2176 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2177 // transaction won't be generated until they send us their next RAA, which will mean
2178 // dropping any HTLCs in this state.
2183 for htlc in context.holding_cell_htlc_updates.iter() {
2185 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2186 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2191 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2192 // ack we're guaranteed to never include them in commitment txs anymore.
2196 let num_htlcs = included_htlcs + addl_htlcs;
2197 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2198 #[cfg(any(test, fuzzing))]
2201 if fee_spike_buffer_htlc.is_some() {
2202 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2204 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2205 + context.holding_cell_htlc_updates.len();
2206 let commitment_tx_info = CommitmentTxInfoCached {
2208 total_pending_htlcs,
2209 next_holder_htlc_id: match htlc.origin {
2210 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2211 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2213 next_counterparty_htlc_id: match htlc.origin {
2214 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2215 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2217 feerate: context.feerate_per_kw,
2219 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2224 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2225 /// pending HTLCs that are on track to be in their next commitment tx
2227 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2228 /// `fee_spike_buffer_htlc` is `Some`.
2230 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2231 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2233 /// Dust HTLCs are excluded.
2234 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2235 let context = &self;
2236 assert!(!context.is_outbound());
2238 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2241 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2242 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2244 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2245 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2247 let mut addl_htlcs = 0;
2248 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2250 HTLCInitiator::LocalOffered => {
2251 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2255 HTLCInitiator::RemoteOffered => {
2256 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2262 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2263 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2264 // committed outbound HTLCs, see below.
2265 let mut included_htlcs = 0;
2266 for ref htlc in context.pending_inbound_htlcs.iter() {
2267 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2270 included_htlcs += 1;
2273 for ref htlc in context.pending_outbound_htlcs.iter() {
2274 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2277 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2278 // i.e. if they've responded to us with an RAA after announcement.
2280 OutboundHTLCState::Committed => included_htlcs += 1,
2281 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2282 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2287 let num_htlcs = included_htlcs + addl_htlcs;
2288 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2289 #[cfg(any(test, fuzzing))]
2292 if fee_spike_buffer_htlc.is_some() {
2293 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2295 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2296 let commitment_tx_info = CommitmentTxInfoCached {
2298 total_pending_htlcs,
2299 next_holder_htlc_id: match htlc.origin {
2300 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2301 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2303 next_counterparty_htlc_id: match htlc.origin {
2304 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2305 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2307 feerate: context.feerate_per_kw,
2309 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2314 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2315 where F: Fn() -> Option<O> {
2316 match self.channel_state {
2317 ChannelState::FundingNegotiated => f(),
2318 ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2327 /// Returns the transaction if there is a pending funding transaction that is yet to be
2329 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2330 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2333 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2335 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2336 self.if_unbroadcasted_funding(||
2337 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2341 /// Returns whether the channel is funded in a batch.
2342 pub fn is_batch_funding(&self) -> bool {
2343 self.is_batch_funding.is_some()
2346 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2348 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2349 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2352 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2353 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2354 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2355 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2356 /// immediately (others we will have to allow to time out).
2357 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2358 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2359 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2360 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2361 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2362 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2364 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2365 // return them to fail the payment.
2366 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2367 let counterparty_node_id = self.get_counterparty_node_id();
2368 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2370 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2371 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2376 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2377 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2378 // returning a channel monitor update here would imply a channel monitor update before
2379 // we even registered the channel monitor to begin with, which is invalid.
2380 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2381 // funding transaction, don't return a funding txo (which prevents providing the
2382 // monitor update to the user, even if we return one).
2383 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2384 let generate_monitor_update = match self.channel_state {
2385 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2388 if generate_monitor_update {
2389 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2390 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2391 update_id: self.latest_monitor_update_id,
2392 counterparty_node_id: Some(self.counterparty_node_id),
2393 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2397 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2399 self.channel_state = ChannelState::ShutdownComplete;
2400 self.update_time_counter += 1;
2403 dropped_outbound_htlcs,
2404 unbroadcasted_batch_funding_txid,
2405 channel_id: self.channel_id,
2406 counterparty_node_id: self.counterparty_node_id,
2410 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2411 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2412 let counterparty_keys = self.build_remote_transaction_keys();
2413 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2415 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2416 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2417 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2418 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2420 match &self.holder_signer {
2421 // TODO (arik): move match into calling method for Taproot
2422 ChannelSignerType::Ecdsa(ecdsa) => {
2423 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2424 .map(|(signature, _)| msgs::FundingSigned {
2425 channel_id: self.channel_id(),
2428 partial_signature_with_nonce: None,
2432 if funding_signed.is_none() {
2433 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2434 self.signer_pending_funding = true;
2435 } else if self.signer_pending_funding {
2436 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2437 self.signer_pending_funding = false;
2440 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2441 (counterparty_initial_commitment_tx, funding_signed)
2443 // TODO (taproot|arik)
2450 // Internal utility functions for channels
2452 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2453 /// `channel_value_satoshis` in msat, set through
2454 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2456 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2458 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2459 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2460 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2462 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2465 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2467 channel_value_satoshis * 10 * configured_percent
2470 /// Returns a minimum channel reserve value the remote needs to maintain,
2471 /// required by us according to the configured or default
2472 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2474 /// Guaranteed to return a value no larger than channel_value_satoshis
2476 /// This is used both for outbound and inbound channels and has lower bound
2477 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2478 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2479 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2480 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2483 /// This is for legacy reasons, present for forward-compatibility.
2484 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2485 /// from storage. Hence, we use this function to not persist default values of
2486 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2487 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2488 let (q, _) = channel_value_satoshis.overflowing_div(100);
2489 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2492 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2493 // Note that num_htlcs should not include dust HTLCs.
2495 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2496 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2499 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2500 // Note that num_htlcs should not include dust HTLCs.
2501 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2502 // Note that we need to divide before multiplying to round properly,
2503 // since the lowest denomination of bitcoin on-chain is the satoshi.
2504 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2507 // Holder designates channel data owned for the benefit of the user client.
2508 // Counterparty designates channel data owned by the another channel participant entity.
2509 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2510 pub context: ChannelContext<SP>,
2513 #[cfg(any(test, fuzzing))]
2514 struct CommitmentTxInfoCached {
2516 total_pending_htlcs: usize,
2517 next_holder_htlc_id: u64,
2518 next_counterparty_htlc_id: u64,
2522 impl<SP: Deref> Channel<SP> where
2523 SP::Target: SignerProvider,
2524 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2526 fn check_remote_fee<F: Deref, L: Deref>(
2527 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2528 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2529 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2531 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2532 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2534 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2536 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2537 if feerate_per_kw < lower_limit {
2538 if let Some(cur_feerate) = cur_feerate_per_kw {
2539 if feerate_per_kw > cur_feerate {
2541 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2542 cur_feerate, feerate_per_kw);
2546 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2552 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2553 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2554 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2555 // outside of those situations will fail.
2556 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2560 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2565 1 + // script length (0)
2569 )*4 + // * 4 for non-witness parts
2570 2 + // witness marker and flag
2571 1 + // witness element count
2572 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2573 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2574 2*(1 + 71); // two signatures + sighash type flags
2575 if let Some(spk) = a_scriptpubkey {
2576 ret += ((8+1) + // output values and script length
2577 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2579 if let Some(spk) = b_scriptpubkey {
2580 ret += ((8+1) + // output values and script length
2581 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2587 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2588 assert!(self.context.pending_inbound_htlcs.is_empty());
2589 assert!(self.context.pending_outbound_htlcs.is_empty());
2590 assert!(self.context.pending_update_fee.is_none());
2592 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2593 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2594 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2596 if value_to_holder < 0 {
2597 assert!(self.context.is_outbound());
2598 total_fee_satoshis += (-value_to_holder) as u64;
2599 } else if value_to_counterparty < 0 {
2600 assert!(!self.context.is_outbound());
2601 total_fee_satoshis += (-value_to_counterparty) as u64;
2604 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2605 value_to_counterparty = 0;
2608 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2609 value_to_holder = 0;
2612 assert!(self.context.shutdown_scriptpubkey.is_some());
2613 let holder_shutdown_script = self.get_closing_scriptpubkey();
2614 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2615 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2617 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2618 (closing_transaction, total_fee_satoshis)
2621 fn funding_outpoint(&self) -> OutPoint {
2622 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2625 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2628 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2629 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2631 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2633 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2634 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2635 where L::Target: Logger {
2636 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2637 // (see equivalent if condition there).
2638 assert!(self.context.channel_state.should_force_holding_cell());
2639 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2640 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2641 self.context.latest_monitor_update_id = mon_update_id;
2642 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2643 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2647 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2648 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2649 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2650 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2652 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2653 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2656 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2657 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2658 // these, but for now we just have to treat them as normal.
2660 let mut pending_idx = core::usize::MAX;
2661 let mut htlc_value_msat = 0;
2662 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2663 if htlc.htlc_id == htlc_id_arg {
2664 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2665 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2666 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2668 InboundHTLCState::Committed => {},
2669 InboundHTLCState::LocalRemoved(ref reason) => {
2670 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2672 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2673 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2675 return UpdateFulfillFetch::DuplicateClaim {};
2678 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2679 // Don't return in release mode here so that we can update channel_monitor
2683 htlc_value_msat = htlc.amount_msat;
2687 if pending_idx == core::usize::MAX {
2688 #[cfg(any(test, fuzzing))]
2689 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2690 // this is simply a duplicate claim, not previously failed and we lost funds.
2691 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2692 return UpdateFulfillFetch::DuplicateClaim {};
2695 // Now update local state:
2697 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2698 // can claim it even if the channel hits the chain before we see their next commitment.
2699 self.context.latest_monitor_update_id += 1;
2700 let monitor_update = ChannelMonitorUpdate {
2701 update_id: self.context.latest_monitor_update_id,
2702 counterparty_node_id: Some(self.context.counterparty_node_id),
2703 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2704 payment_preimage: payment_preimage_arg.clone(),
2708 if self.context.channel_state.should_force_holding_cell() {
2709 // Note that this condition is the same as the assertion in
2710 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2711 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2712 // do not not get into this branch.
2713 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2714 match pending_update {
2715 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2716 if htlc_id_arg == htlc_id {
2717 // Make sure we don't leave latest_monitor_update_id incremented here:
2718 self.context.latest_monitor_update_id -= 1;
2719 #[cfg(any(test, fuzzing))]
2720 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2721 return UpdateFulfillFetch::DuplicateClaim {};
2724 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2725 if htlc_id_arg == htlc_id {
2726 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2727 // TODO: We may actually be able to switch to a fulfill here, though its
2728 // rare enough it may not be worth the complexity burden.
2729 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2730 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2736 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2737 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2738 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2740 #[cfg(any(test, fuzzing))]
2741 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2742 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2744 #[cfg(any(test, fuzzing))]
2745 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2748 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2749 if let InboundHTLCState::Committed = htlc.state {
2751 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2752 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2754 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2755 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2758 UpdateFulfillFetch::NewClaim {
2761 msg: Some(msgs::UpdateFulfillHTLC {
2762 channel_id: self.context.channel_id(),
2763 htlc_id: htlc_id_arg,
2764 payment_preimage: payment_preimage_arg,
2769 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2770 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2771 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2772 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2773 // Even if we aren't supposed to let new monitor updates with commitment state
2774 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2775 // matter what. Sadly, to push a new monitor update which flies before others
2776 // already queued, we have to insert it into the pending queue and update the
2777 // update_ids of all the following monitors.
2778 if release_cs_monitor && msg.is_some() {
2779 let mut additional_update = self.build_commitment_no_status_check(logger);
2780 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2781 // to be strictly increasing by one, so decrement it here.
2782 self.context.latest_monitor_update_id = monitor_update.update_id;
2783 monitor_update.updates.append(&mut additional_update.updates);
2785 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2786 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2787 monitor_update.update_id = new_mon_id;
2788 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2789 held_update.update.update_id += 1;
2792 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2793 let update = self.build_commitment_no_status_check(logger);
2794 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2800 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2801 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2803 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2807 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2808 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2809 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2810 /// before we fail backwards.
2812 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2813 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2814 /// [`ChannelError::Ignore`].
2815 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2816 -> Result<(), ChannelError> where L::Target: Logger {
2817 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2818 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2821 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2822 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2823 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2824 /// before we fail backwards.
2826 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2827 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2828 /// [`ChannelError::Ignore`].
2829 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2830 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2831 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2832 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2835 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2836 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2837 // these, but for now we just have to treat them as normal.
2839 let mut pending_idx = core::usize::MAX;
2840 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2841 if htlc.htlc_id == htlc_id_arg {
2843 InboundHTLCState::Committed => {},
2844 InboundHTLCState::LocalRemoved(ref reason) => {
2845 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2847 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2852 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2853 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2859 if pending_idx == core::usize::MAX {
2860 #[cfg(any(test, fuzzing))]
2861 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2862 // is simply a duplicate fail, not previously failed and we failed-back too early.
2863 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2867 if self.context.channel_state.should_force_holding_cell() {
2868 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2869 force_holding_cell = true;
2872 // Now update local state:
2873 if force_holding_cell {
2874 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2875 match pending_update {
2876 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2877 if htlc_id_arg == htlc_id {
2878 #[cfg(any(test, fuzzing))]
2879 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2883 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2884 if htlc_id_arg == htlc_id {
2885 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2886 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2892 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2893 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2894 htlc_id: htlc_id_arg,
2900 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2902 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2903 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2906 Ok(Some(msgs::UpdateFailHTLC {
2907 channel_id: self.context.channel_id(),
2908 htlc_id: htlc_id_arg,
2913 // Message handlers:
2914 /// Updates the state of the channel to indicate that all channels in the batch have received
2915 /// funding_signed and persisted their monitors.
2916 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2917 /// treated as a non-batch channel going forward.
2918 pub fn set_batch_ready(&mut self) {
2919 self.context.is_batch_funding = None;
2920 self.context.channel_state.clear_waiting_for_batch();
2923 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2924 /// and the channel is now usable (and public), this may generate an announcement_signatures to
2926 pub fn channel_ready<NS: Deref, L: Deref>(
2927 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2928 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2929 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2931 NS::Target: NodeSigner,
2934 if self.context.channel_state.is_peer_disconnected() {
2935 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2936 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2939 if let Some(scid_alias) = msg.short_channel_id_alias {
2940 if Some(scid_alias) != self.context.short_channel_id {
2941 // The scid alias provided can be used to route payments *from* our counterparty,
2942 // i.e. can be used for inbound payments and provided in invoices, but is not used
2943 // when routing outbound payments.
2944 self.context.latest_inbound_scid_alias = Some(scid_alias);
2948 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2949 // batch, but we can receive channel_ready messages.
2950 let mut check_reconnection = false;
2951 match &self.context.channel_state {
2952 ChannelState::AwaitingChannelReady(flags) => {
2953 let flags = *flags & !FundedStateFlags::ALL;
2954 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
2955 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
2956 // If we reconnected before sending our `channel_ready` they may still resend theirs.
2957 check_reconnection = true;
2958 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
2959 self.context.channel_state.set_their_channel_ready();
2960 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
2961 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
2962 self.context.update_time_counter += 1;
2964 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
2965 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
2968 // If we reconnected before sending our `channel_ready` they may still resend theirs.
2969 ChannelState::ChannelReady(_) => check_reconnection = true,
2970 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
2972 if check_reconnection {
2973 // They probably disconnected/reconnected and re-sent the channel_ready, which is
2974 // required, or they're sending a fresh SCID alias.
2975 let expected_point =
2976 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2977 // If they haven't ever sent an updated point, the point they send should match
2979 self.context.counterparty_cur_commitment_point
2980 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2981 // If we've advanced the commitment number once, the second commitment point is
2982 // at `counterparty_prev_commitment_point`, which is not yet revoked.
2983 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2984 self.context.counterparty_prev_commitment_point
2986 // If they have sent updated points, channel_ready is always supposed to match
2987 // their "first" point, which we re-derive here.
2988 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2989 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2990 ).expect("We already advanced, so previous secret keys should have been validated already")))
2992 if expected_point != Some(msg.next_per_commitment_point) {
2993 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
2998 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
2999 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3001 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3003 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3006 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3007 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3008 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3009 ) -> Result<(), ChannelError>
3010 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3011 FE::Target: FeeEstimator, L::Target: Logger,
3013 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3014 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3016 // We can't accept HTLCs sent after we've sent a shutdown.
3017 if self.context.channel_state.is_local_shutdown_sent() {
3018 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3020 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3021 if self.context.channel_state.is_remote_shutdown_sent() {
3022 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3024 if self.context.channel_state.is_peer_disconnected() {
3025 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3027 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3028 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3030 if msg.amount_msat == 0 {
3031 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3033 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3034 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3037 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3038 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3039 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3040 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3042 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3043 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3046 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3047 // the reserve_satoshis we told them to always have as direct payment so that they lose
3048 // something if we punish them for broadcasting an old state).
3049 // Note that we don't really care about having a small/no to_remote output in our local
3050 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3051 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3052 // present in the next commitment transaction we send them (at least for fulfilled ones,
3053 // failed ones won't modify value_to_self).
3054 // Note that we will send HTLCs which another instance of rust-lightning would think
3055 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3056 // Channel state once they will not be present in the next received commitment
3058 let mut removed_outbound_total_msat = 0;
3059 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3060 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3061 removed_outbound_total_msat += htlc.amount_msat;
3062 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3063 removed_outbound_total_msat += htlc.amount_msat;
3067 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3068 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3071 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3072 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3073 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3075 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3076 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3077 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3078 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3079 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3080 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3081 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3085 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3086 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3087 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3088 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3089 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3090 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3091 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3095 let pending_value_to_self_msat =
3096 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3097 let pending_remote_value_msat =
3098 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3099 if pending_remote_value_msat < msg.amount_msat {
3100 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3103 // Check that the remote can afford to pay for this HTLC on-chain at the current
3104 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3106 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3107 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3108 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3110 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3111 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3115 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3116 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3118 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3119 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3123 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3124 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3128 if !self.context.is_outbound() {
3129 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3130 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3131 // side, only on the sender's. Note that with anchor outputs we are no longer as
3132 // sensitive to fee spikes, so we need to account for them.
3133 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3134 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3135 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3136 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3138 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3139 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3140 // the HTLC, i.e. its status is already set to failing.
3141 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3142 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3145 // Check that they won't violate our local required channel reserve by adding this HTLC.
3146 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3147 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3148 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3149 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3152 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3153 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3155 if msg.cltv_expiry >= 500000000 {
3156 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3159 if self.context.channel_state.is_local_shutdown_sent() {
3160 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3161 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3165 // Now update local state:
3166 self.context.next_counterparty_htlc_id += 1;
3167 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3168 htlc_id: msg.htlc_id,
3169 amount_msat: msg.amount_msat,
3170 payment_hash: msg.payment_hash,
3171 cltv_expiry: msg.cltv_expiry,
3172 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3177 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3179 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3180 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3181 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3182 if htlc.htlc_id == htlc_id {
3183 let outcome = match check_preimage {
3184 None => fail_reason.into(),
3185 Some(payment_preimage) => {
3186 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3187 if payment_hash != htlc.payment_hash {
3188 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3190 OutboundHTLCOutcome::Success(Some(payment_preimage))
3194 OutboundHTLCState::LocalAnnounced(_) =>
3195 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3196 OutboundHTLCState::Committed => {
3197 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3199 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3200 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3205 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3208 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3209 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3210 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3212 if self.context.channel_state.is_peer_disconnected() {
3213 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3216 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3219 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3220 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3221 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3223 if self.context.channel_state.is_peer_disconnected() {
3224 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3227 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3231 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3232 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3233 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3235 if self.context.channel_state.is_peer_disconnected() {
3236 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3239 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3243 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3244 where L::Target: Logger
3246 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3247 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3249 if self.context.channel_state.is_peer_disconnected() {
3250 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3252 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3253 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3256 let funding_script = self.context.get_funding_redeemscript();
3258 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3260 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3261 let commitment_txid = {
3262 let trusted_tx = commitment_stats.tx.trust();
3263 let bitcoin_tx = trusted_tx.built_transaction();
3264 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3266 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3267 log_bytes!(msg.signature.serialize_compact()[..]),
3268 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3269 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3270 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3271 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3275 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3277 // If our counterparty updated the channel fee in this commitment transaction, check that
3278 // they can actually afford the new fee now.
3279 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3280 update_state == FeeUpdateState::RemoteAnnounced
3283 debug_assert!(!self.context.is_outbound());
3284 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3285 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3286 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3289 #[cfg(any(test, fuzzing))]
3291 if self.context.is_outbound() {
3292 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3293 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3294 if let Some(info) = projected_commit_tx_info {
3295 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3296 + self.context.holding_cell_htlc_updates.len();
3297 if info.total_pending_htlcs == total_pending_htlcs
3298 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3299 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3300 && info.feerate == self.context.feerate_per_kw {
3301 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3307 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3308 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3311 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3312 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3313 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3314 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3315 // backwards compatibility, we never use it in production. To provide test coverage, here,
3316 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3317 #[allow(unused_assignments, unused_mut)]
3318 let mut separate_nondust_htlc_sources = false;
3319 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3320 use core::hash::{BuildHasher, Hasher};
3321 // Get a random value using the only std API to do so - the DefaultHasher
3322 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3323 separate_nondust_htlc_sources = rand_val % 2 == 0;
3326 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3327 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3328 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3329 if let Some(_) = htlc.transaction_output_index {
3330 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3331 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3332 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3334 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3335 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3336 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3337 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3338 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3339 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3340 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3341 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3343 if !separate_nondust_htlc_sources {
3344 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3347 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3349 if separate_nondust_htlc_sources {
3350 if let Some(source) = source_opt.take() {
3351 nondust_htlc_sources.push(source);
3354 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3357 let holder_commitment_tx = HolderCommitmentTransaction::new(
3358 commitment_stats.tx,
3360 msg.htlc_signatures.clone(),
3361 &self.context.get_holder_pubkeys().funding_pubkey,
3362 self.context.counterparty_funding_pubkey()
3365 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3366 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3368 // Update state now that we've passed all the can-fail calls...
3369 let mut need_commitment = false;
3370 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3371 if *update_state == FeeUpdateState::RemoteAnnounced {
3372 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3373 need_commitment = true;
3377 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3378 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3379 Some(forward_info.clone())
3381 if let Some(forward_info) = new_forward {
3382 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3383 &htlc.payment_hash, &self.context.channel_id);
3384 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3385 need_commitment = true;
3388 let mut claimed_htlcs = Vec::new();
3389 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3390 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3391 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3392 &htlc.payment_hash, &self.context.channel_id);
3393 // Grab the preimage, if it exists, instead of cloning
3394 let mut reason = OutboundHTLCOutcome::Success(None);
3395 mem::swap(outcome, &mut reason);
3396 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3397 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3398 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3399 // have a `Success(None)` reason. In this case we could forget some HTLC
3400 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3401 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3403 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3405 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3406 need_commitment = true;
3410 self.context.latest_monitor_update_id += 1;
3411 let mut monitor_update = ChannelMonitorUpdate {
3412 update_id: self.context.latest_monitor_update_id,
3413 counterparty_node_id: Some(self.context.counterparty_node_id),
3414 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3415 commitment_tx: holder_commitment_tx,
3416 htlc_outputs: htlcs_and_sigs,
3418 nondust_htlc_sources,
3422 self.context.cur_holder_commitment_transaction_number -= 1;
3423 self.context.expecting_peer_commitment_signed = false;
3424 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3425 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3426 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3428 if self.context.channel_state.is_monitor_update_in_progress() {
3429 // In case we initially failed monitor updating without requiring a response, we need
3430 // to make sure the RAA gets sent first.
3431 self.context.monitor_pending_revoke_and_ack = true;
3432 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3433 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3434 // the corresponding HTLC status updates so that
3435 // get_last_commitment_update_for_send includes the right HTLCs.
3436 self.context.monitor_pending_commitment_signed = true;
3437 let mut additional_update = self.build_commitment_no_status_check(logger);
3438 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3439 // strictly increasing by one, so decrement it here.
3440 self.context.latest_monitor_update_id = monitor_update.update_id;
3441 monitor_update.updates.append(&mut additional_update.updates);
3443 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3444 &self.context.channel_id);
3445 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3448 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3449 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3450 // we'll send one right away when we get the revoke_and_ack when we
3451 // free_holding_cell_htlcs().
3452 let mut additional_update = self.build_commitment_no_status_check(logger);
3453 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3454 // strictly increasing by one, so decrement it here.
3455 self.context.latest_monitor_update_id = monitor_update.update_id;
3456 monitor_update.updates.append(&mut additional_update.updates);
3460 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3461 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3462 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3463 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3466 /// Public version of the below, checking relevant preconditions first.
3467 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3468 /// returns `(None, Vec::new())`.
3469 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3470 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3471 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3472 where F::Target: FeeEstimator, L::Target: Logger
3474 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3475 self.free_holding_cell_htlcs(fee_estimator, logger)
3476 } else { (None, Vec::new()) }
3479 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3480 /// for our counterparty.
3481 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3482 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3483 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3484 where F::Target: FeeEstimator, L::Target: Logger
3486 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3487 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3488 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3489 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3491 let mut monitor_update = ChannelMonitorUpdate {
3492 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3493 counterparty_node_id: Some(self.context.counterparty_node_id),
3494 updates: Vec::new(),
3497 let mut htlc_updates = Vec::new();
3498 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3499 let mut update_add_count = 0;
3500 let mut update_fulfill_count = 0;
3501 let mut update_fail_count = 0;
3502 let mut htlcs_to_fail = Vec::new();
3503 for htlc_update in htlc_updates.drain(..) {
3504 // Note that this *can* fail, though it should be due to rather-rare conditions on
3505 // fee races with adding too many outputs which push our total payments just over
3506 // the limit. In case it's less rare than I anticipate, we may want to revisit
3507 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3508 // to rebalance channels.
3509 match &htlc_update {
3510 &HTLCUpdateAwaitingACK::AddHTLC {
3511 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3512 skimmed_fee_msat, blinding_point, ..
3514 match self.send_htlc(
3515 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3516 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3518 Ok(_) => update_add_count += 1,
3521 ChannelError::Ignore(ref msg) => {
3522 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3523 // If we fail to send here, then this HTLC should
3524 // be failed backwards. Failing to send here
3525 // indicates that this HTLC may keep being put back
3526 // into the holding cell without ever being
3527 // successfully forwarded/failed/fulfilled, causing
3528 // our counterparty to eventually close on us.
3529 htlcs_to_fail.push((source.clone(), *payment_hash));
3532 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3538 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3539 // If an HTLC claim was previously added to the holding cell (via
3540 // `get_update_fulfill_htlc`, then generating the claim message itself must
3541 // not fail - any in between attempts to claim the HTLC will have resulted
3542 // in it hitting the holding cell again and we cannot change the state of a
3543 // holding cell HTLC from fulfill to anything else.
3544 let mut additional_monitor_update =
3545 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3546 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3547 { monitor_update } else { unreachable!() };
3548 update_fulfill_count += 1;
3549 monitor_update.updates.append(&mut additional_monitor_update.updates);
3551 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3552 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3553 Ok(update_fail_msg_option) => {
3554 // If an HTLC failure was previously added to the holding cell (via
3555 // `queue_fail_htlc`) then generating the fail message itself must
3556 // not fail - we should never end up in a state where we double-fail
3557 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3558 // for a full revocation before failing.
3559 debug_assert!(update_fail_msg_option.is_some());
3560 update_fail_count += 1;
3563 if let ChannelError::Ignore(_) = e {}
3565 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3572 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3573 return (None, htlcs_to_fail);
3575 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3576 self.send_update_fee(feerate, false, fee_estimator, logger)
3581 let mut additional_update = self.build_commitment_no_status_check(logger);
3582 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3583 // but we want them to be strictly increasing by one, so reset it here.
3584 self.context.latest_monitor_update_id = monitor_update.update_id;
3585 monitor_update.updates.append(&mut additional_update.updates);
3587 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3588 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3589 update_add_count, update_fulfill_count, update_fail_count);
3591 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3592 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3598 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3599 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3600 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3601 /// generating an appropriate error *after* the channel state has been updated based on the
3602 /// revoke_and_ack message.
3603 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3604 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3605 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3606 where F::Target: FeeEstimator, L::Target: Logger,
3608 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3609 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3611 if self.context.channel_state.is_peer_disconnected() {
3612 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3614 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3615 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3618 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3620 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3621 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3622 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3626 if !self.context.channel_state.is_awaiting_remote_revoke() {
3627 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3628 // haven't given them a new commitment transaction to broadcast). We should probably
3629 // take advantage of this by updating our channel monitor, sending them an error, and
3630 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3631 // lot of work, and there's some chance this is all a misunderstanding anyway.
3632 // We have to do *something*, though, since our signer may get mad at us for otherwise
3633 // jumping a remote commitment number, so best to just force-close and move on.
3634 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3637 #[cfg(any(test, fuzzing))]
3639 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3640 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3643 match &self.context.holder_signer {
3644 ChannelSignerType::Ecdsa(ecdsa) => {
3645 ecdsa.validate_counterparty_revocation(
3646 self.context.cur_counterparty_commitment_transaction_number + 1,
3648 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3650 // TODO (taproot|arik)
3655 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3656 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3657 self.context.latest_monitor_update_id += 1;
3658 let mut monitor_update = ChannelMonitorUpdate {
3659 update_id: self.context.latest_monitor_update_id,
3660 counterparty_node_id: Some(self.context.counterparty_node_id),
3661 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3662 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3663 secret: msg.per_commitment_secret,
3667 // Update state now that we've passed all the can-fail calls...
3668 // (note that we may still fail to generate the new commitment_signed message, but that's
3669 // OK, we step the channel here and *then* if the new generation fails we can fail the
3670 // channel based on that, but stepping stuff here should be safe either way.
3671 self.context.channel_state.clear_awaiting_remote_revoke();
3672 self.context.sent_message_awaiting_response = None;
3673 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3674 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3675 self.context.cur_counterparty_commitment_transaction_number -= 1;
3677 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3678 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3681 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3682 let mut to_forward_infos = Vec::new();
3683 let mut revoked_htlcs = Vec::new();
3684 let mut finalized_claimed_htlcs = Vec::new();
3685 let mut update_fail_htlcs = Vec::new();
3686 let mut update_fail_malformed_htlcs = Vec::new();
3687 let mut require_commitment = false;
3688 let mut value_to_self_msat_diff: i64 = 0;
3691 // Take references explicitly so that we can hold multiple references to self.context.
3692 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3693 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3694 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3696 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3697 pending_inbound_htlcs.retain(|htlc| {
3698 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3699 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3700 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3701 value_to_self_msat_diff += htlc.amount_msat as i64;
3703 *expecting_peer_commitment_signed = true;
3707 pending_outbound_htlcs.retain(|htlc| {
3708 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3709 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3710 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3711 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3713 finalized_claimed_htlcs.push(htlc.source.clone());
3714 // They fulfilled, so we sent them money
3715 value_to_self_msat_diff -= htlc.amount_msat as i64;
3720 for htlc in pending_inbound_htlcs.iter_mut() {
3721 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3723 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3727 let mut state = InboundHTLCState::Committed;
3728 mem::swap(&mut state, &mut htlc.state);
3730 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3731 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3732 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3733 require_commitment = true;
3734 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3735 match forward_info {
3736 PendingHTLCStatus::Fail(fail_msg) => {
3737 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3738 require_commitment = true;
3740 HTLCFailureMsg::Relay(msg) => {
3741 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3742 update_fail_htlcs.push(msg)
3744 HTLCFailureMsg::Malformed(msg) => {
3745 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3746 update_fail_malformed_htlcs.push(msg)
3750 PendingHTLCStatus::Forward(forward_info) => {
3751 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3752 to_forward_infos.push((forward_info, htlc.htlc_id));
3753 htlc.state = InboundHTLCState::Committed;
3759 for htlc in pending_outbound_htlcs.iter_mut() {
3760 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3761 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3762 htlc.state = OutboundHTLCState::Committed;
3763 *expecting_peer_commitment_signed = true;
3765 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3766 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3767 // Grab the preimage, if it exists, instead of cloning
3768 let mut reason = OutboundHTLCOutcome::Success(None);
3769 mem::swap(outcome, &mut reason);
3770 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3771 require_commitment = true;
3775 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3777 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3778 match update_state {
3779 FeeUpdateState::Outbound => {
3780 debug_assert!(self.context.is_outbound());
3781 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3782 self.context.feerate_per_kw = feerate;
3783 self.context.pending_update_fee = None;
3784 self.context.expecting_peer_commitment_signed = true;
3786 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3787 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3788 debug_assert!(!self.context.is_outbound());
3789 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3790 require_commitment = true;
3791 self.context.feerate_per_kw = feerate;
3792 self.context.pending_update_fee = None;
3797 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3798 let release_state_str =
3799 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3800 macro_rules! return_with_htlcs_to_fail {
3801 ($htlcs_to_fail: expr) => {
3802 if !release_monitor {
3803 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3804 update: monitor_update,
3806 return Ok(($htlcs_to_fail, None));
3808 return Ok(($htlcs_to_fail, Some(monitor_update)));
3813 if self.context.channel_state.is_monitor_update_in_progress() {
3814 // We can't actually generate a new commitment transaction (incl by freeing holding
3815 // cells) while we can't update the monitor, so we just return what we have.
3816 if require_commitment {
3817 self.context.monitor_pending_commitment_signed = true;
3818 // When the monitor updating is restored we'll call
3819 // get_last_commitment_update_for_send(), which does not update state, but we're
3820 // definitely now awaiting a remote revoke before we can step forward any more, so
3822 let mut additional_update = self.build_commitment_no_status_check(logger);
3823 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3824 // strictly increasing by one, so decrement it here.
3825 self.context.latest_monitor_update_id = monitor_update.update_id;
3826 monitor_update.updates.append(&mut additional_update.updates);
3828 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3829 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3830 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3831 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3832 return_with_htlcs_to_fail!(Vec::new());
3835 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3836 (Some(mut additional_update), htlcs_to_fail) => {
3837 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3838 // strictly increasing by one, so decrement it here.
3839 self.context.latest_monitor_update_id = monitor_update.update_id;
3840 monitor_update.updates.append(&mut additional_update.updates);
3842 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3843 &self.context.channel_id(), release_state_str);
3845 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3846 return_with_htlcs_to_fail!(htlcs_to_fail);
3848 (None, htlcs_to_fail) => {
3849 if require_commitment {
3850 let mut additional_update = self.build_commitment_no_status_check(logger);
3852 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3853 // strictly increasing by one, so decrement it here.
3854 self.context.latest_monitor_update_id = monitor_update.update_id;
3855 monitor_update.updates.append(&mut additional_update.updates);
3857 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3858 &self.context.channel_id(),
3859 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3862 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3863 return_with_htlcs_to_fail!(htlcs_to_fail);
3865 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3866 &self.context.channel_id(), release_state_str);
3868 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3869 return_with_htlcs_to_fail!(htlcs_to_fail);
3875 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3876 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3877 /// commitment update.
3878 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3879 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3880 where F::Target: FeeEstimator, L::Target: Logger
3882 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3883 assert!(msg_opt.is_none(), "We forced holding cell?");
3886 /// Adds a pending update to this channel. See the doc for send_htlc for
3887 /// further details on the optionness of the return value.
3888 /// If our balance is too low to cover the cost of the next commitment transaction at the
3889 /// new feerate, the update is cancelled.
3891 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3892 /// [`Channel`] if `force_holding_cell` is false.
3893 fn send_update_fee<F: Deref, L: Deref>(
3894 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3895 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3896 ) -> Option<msgs::UpdateFee>
3897 where F::Target: FeeEstimator, L::Target: Logger
3899 if !self.context.is_outbound() {
3900 panic!("Cannot send fee from inbound channel");
3902 if !self.context.is_usable() {
3903 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3905 if !self.context.is_live() {
3906 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3909 // Before proposing a feerate update, check that we can actually afford the new fee.
3910 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3911 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3912 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3913 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3914 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3915 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3916 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3917 //TODO: auto-close after a number of failures?
3918 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3922 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3923 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3924 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3925 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3926 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3927 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3930 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3931 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3935 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
3936 force_holding_cell = true;
3939 if force_holding_cell {
3940 self.context.holding_cell_update_fee = Some(feerate_per_kw);
3944 debug_assert!(self.context.pending_update_fee.is_none());
3945 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3947 Some(msgs::UpdateFee {
3948 channel_id: self.context.channel_id,
3953 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3954 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3956 /// No further message handling calls may be made until a channel_reestablish dance has
3958 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3959 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3960 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
3961 if self.context.channel_state.is_pre_funded_state() {
3965 if self.context.channel_state.is_peer_disconnected() {
3966 // While the below code should be idempotent, it's simpler to just return early, as
3967 // redundant disconnect events can fire, though they should be rare.
3971 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3972 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3975 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3976 // will be retransmitted.
3977 self.context.last_sent_closing_fee = None;
3978 self.context.pending_counterparty_closing_signed = None;
3979 self.context.closing_fee_limits = None;
3981 let mut inbound_drop_count = 0;
3982 self.context.pending_inbound_htlcs.retain(|htlc| {
3984 InboundHTLCState::RemoteAnnounced(_) => {
3985 // They sent us an update_add_htlc but we never got the commitment_signed.
3986 // We'll tell them what commitment_signed we're expecting next and they'll drop
3987 // this HTLC accordingly
3988 inbound_drop_count += 1;
3991 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3992 // We received a commitment_signed updating this HTLC and (at least hopefully)
3993 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
3994 // in response to it yet, so don't touch it.
3997 InboundHTLCState::Committed => true,
3998 InboundHTLCState::LocalRemoved(_) => {
3999 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4000 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4001 // (that we missed). Keep this around for now and if they tell us they missed
4002 // the commitment_signed we can re-transmit the update then.
4007 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4009 if let Some((_, update_state)) = self.context.pending_update_fee {
4010 if update_state == FeeUpdateState::RemoteAnnounced {
4011 debug_assert!(!self.context.is_outbound());
4012 self.context.pending_update_fee = None;
4016 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4017 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4018 // They sent us an update to remove this but haven't yet sent the corresponding
4019 // commitment_signed, we need to move it back to Committed and they can re-send
4020 // the update upon reconnection.
4021 htlc.state = OutboundHTLCState::Committed;
4025 self.context.sent_message_awaiting_response = None;
4027 self.context.channel_state.set_peer_disconnected();
4028 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4032 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4033 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4034 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4035 /// update completes (potentially immediately).
4036 /// The messages which were generated with the monitor update must *not* have been sent to the
4037 /// remote end, and must instead have been dropped. They will be regenerated when
4038 /// [`Self::monitor_updating_restored`] is called.
4040 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4041 /// [`chain::Watch`]: crate::chain::Watch
4042 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4043 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4044 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4045 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4046 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4048 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4049 self.context.monitor_pending_commitment_signed |= resend_commitment;
4050 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4051 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4052 self.context.monitor_pending_failures.append(&mut pending_fails);
4053 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4054 self.context.channel_state.set_monitor_update_in_progress();
4057 /// Indicates that the latest ChannelMonitor update has been committed by the client
4058 /// successfully and we should restore normal operation. Returns messages which should be sent
4059 /// to the remote side.
4060 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4061 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4062 user_config: &UserConfig, best_block_height: u32
4063 ) -> MonitorRestoreUpdates
4066 NS::Target: NodeSigner
4068 assert!(self.context.channel_state.is_monitor_update_in_progress());
4069 self.context.channel_state.clear_monitor_update_in_progress();
4071 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4072 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4073 // first received the funding_signed.
4074 let mut funding_broadcastable =
4075 if self.context.is_outbound() &&
4076 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4077 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4079 self.context.funding_transaction.take()
4081 // That said, if the funding transaction is already confirmed (ie we're active with a
4082 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4083 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4084 funding_broadcastable = None;
4087 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4088 // (and we assume the user never directly broadcasts the funding transaction and waits for
4089 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4090 // * an inbound channel that failed to persist the monitor on funding_created and we got
4091 // the funding transaction confirmed before the monitor was persisted, or
4092 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4093 let channel_ready = if self.context.monitor_pending_channel_ready {
4094 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4095 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4096 self.context.monitor_pending_channel_ready = false;
4097 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4098 Some(msgs::ChannelReady {
4099 channel_id: self.context.channel_id(),
4100 next_per_commitment_point,
4101 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4105 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4107 let mut accepted_htlcs = Vec::new();
4108 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4109 let mut failed_htlcs = Vec::new();
4110 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4111 let mut finalized_claimed_htlcs = Vec::new();
4112 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4114 if self.context.channel_state.is_peer_disconnected() {
4115 self.context.monitor_pending_revoke_and_ack = false;
4116 self.context.monitor_pending_commitment_signed = false;
4117 return MonitorRestoreUpdates {
4118 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4119 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4123 let raa = if self.context.monitor_pending_revoke_and_ack {
4124 Some(self.get_last_revoke_and_ack())
4126 let commitment_update = if self.context.monitor_pending_commitment_signed {
4127 self.get_last_commitment_update_for_send(logger).ok()
4129 if commitment_update.is_some() {
4130 self.mark_awaiting_response();
4133 self.context.monitor_pending_revoke_and_ack = false;
4134 self.context.monitor_pending_commitment_signed = false;
4135 let order = self.context.resend_order.clone();
4136 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4137 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4138 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4139 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4140 MonitorRestoreUpdates {
4141 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4145 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4146 where F::Target: FeeEstimator, L::Target: Logger
4148 if self.context.is_outbound() {
4149 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4151 if self.context.channel_state.is_peer_disconnected() {
4152 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4154 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4156 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4157 self.context.update_time_counter += 1;
4158 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4159 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4160 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4161 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4162 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4163 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4164 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4165 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4166 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4167 msg.feerate_per_kw, holder_tx_dust_exposure)));
4169 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4170 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4171 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4177 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4180 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4181 let commitment_update = if self.context.signer_pending_commitment_update {
4182 self.get_last_commitment_update_for_send(logger).ok()
4184 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4185 self.context.get_funding_signed_msg(logger).1
4187 let channel_ready = if funding_signed.is_some() {
4188 self.check_get_channel_ready(0)
4191 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4192 if commitment_update.is_some() { "a" } else { "no" },
4193 if funding_signed.is_some() { "a" } else { "no" },
4194 if channel_ready.is_some() { "a" } else { "no" });
4196 SignerResumeUpdates {
4203 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4204 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4205 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4206 msgs::RevokeAndACK {
4207 channel_id: self.context.channel_id,
4208 per_commitment_secret,
4209 next_per_commitment_point,
4211 next_local_nonce: None,
4215 /// Gets the last commitment update for immediate sending to our peer.
4216 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4217 let mut update_add_htlcs = Vec::new();
4218 let mut update_fulfill_htlcs = Vec::new();
4219 let mut update_fail_htlcs = Vec::new();
4220 let mut update_fail_malformed_htlcs = Vec::new();
4222 for htlc in self.context.pending_outbound_htlcs.iter() {
4223 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4224 update_add_htlcs.push(msgs::UpdateAddHTLC {
4225 channel_id: self.context.channel_id(),
4226 htlc_id: htlc.htlc_id,
4227 amount_msat: htlc.amount_msat,
4228 payment_hash: htlc.payment_hash,
4229 cltv_expiry: htlc.cltv_expiry,
4230 onion_routing_packet: (**onion_packet).clone(),
4231 skimmed_fee_msat: htlc.skimmed_fee_msat,
4232 blinding_point: htlc.blinding_point,
4237 for htlc in self.context.pending_inbound_htlcs.iter() {
4238 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4240 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4241 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4242 channel_id: self.context.channel_id(),
4243 htlc_id: htlc.htlc_id,
4244 reason: err_packet.clone()
4247 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4248 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4249 channel_id: self.context.channel_id(),
4250 htlc_id: htlc.htlc_id,
4251 sha256_of_onion: sha256_of_onion.clone(),
4252 failure_code: failure_code.clone(),
4255 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4256 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4257 channel_id: self.context.channel_id(),
4258 htlc_id: htlc.htlc_id,
4259 payment_preimage: payment_preimage.clone(),
4266 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4267 Some(msgs::UpdateFee {
4268 channel_id: self.context.channel_id(),
4269 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4273 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4274 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4275 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4276 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4277 if self.context.signer_pending_commitment_update {
4278 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4279 self.context.signer_pending_commitment_update = false;
4283 if !self.context.signer_pending_commitment_update {
4284 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4285 self.context.signer_pending_commitment_update = true;
4289 Ok(msgs::CommitmentUpdate {
4290 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4295 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4296 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4297 if self.context.channel_state.is_local_shutdown_sent() {
4298 assert!(self.context.shutdown_scriptpubkey.is_some());
4299 Some(msgs::Shutdown {
4300 channel_id: self.context.channel_id,
4301 scriptpubkey: self.get_closing_scriptpubkey(),
4306 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4307 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4309 /// Some links printed in log lines are included here to check them during build (when run with
4310 /// `cargo doc --document-private-items`):
4311 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4312 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4313 pub fn channel_reestablish<L: Deref, NS: Deref>(
4314 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4315 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4316 ) -> Result<ReestablishResponses, ChannelError>
4319 NS::Target: NodeSigner
4321 if !self.context.channel_state.is_peer_disconnected() {
4322 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4323 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4324 // just close here instead of trying to recover.
4325 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4328 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4329 msg.next_local_commitment_number == 0 {
4330 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4333 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4334 if msg.next_remote_commitment_number > 0 {
4335 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4336 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4337 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4338 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4339 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4341 if msg.next_remote_commitment_number > our_commitment_transaction {
4342 macro_rules! log_and_panic {
4343 ($err_msg: expr) => {
4344 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4345 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4348 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4349 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4350 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4351 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4352 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4353 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4354 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4355 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4359 // Before we change the state of the channel, we check if the peer is sending a very old
4360 // commitment transaction number, if yes we send a warning message.
4361 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4362 return Err(ChannelError::Warn(format!(
4363 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4364 msg.next_remote_commitment_number,
4365 our_commitment_transaction
4369 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4370 // remaining cases either succeed or ErrorMessage-fail).
4371 self.context.channel_state.clear_peer_disconnected();
4372 self.context.sent_message_awaiting_response = None;
4374 let shutdown_msg = self.get_outbound_shutdown();
4376 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4378 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4379 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4380 if !self.context.channel_state.is_our_channel_ready() ||
4381 self.context.channel_state.is_monitor_update_in_progress() {
4382 if msg.next_remote_commitment_number != 0 {
4383 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4385 // Short circuit the whole handler as there is nothing we can resend them
4386 return Ok(ReestablishResponses {
4387 channel_ready: None,
4388 raa: None, commitment_update: None,
4389 order: RAACommitmentOrder::CommitmentFirst,
4390 shutdown_msg, announcement_sigs,
4394 // We have OurChannelReady set!
4395 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4396 return Ok(ReestablishResponses {
4397 channel_ready: Some(msgs::ChannelReady {
4398 channel_id: self.context.channel_id(),
4399 next_per_commitment_point,
4400 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4402 raa: None, commitment_update: None,
4403 order: RAACommitmentOrder::CommitmentFirst,
4404 shutdown_msg, announcement_sigs,
4408 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4409 // Remote isn't waiting on any RevokeAndACK from us!
4410 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4412 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4413 if self.context.channel_state.is_monitor_update_in_progress() {
4414 self.context.monitor_pending_revoke_and_ack = true;
4417 Some(self.get_last_revoke_and_ack())
4420 debug_assert!(false, "All values should have been handled in the four cases above");
4421 return Err(ChannelError::Close(format!(
4422 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4423 msg.next_remote_commitment_number,
4424 our_commitment_transaction
4428 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4429 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4430 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4431 // the corresponding revoke_and_ack back yet.
4432 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4433 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4434 self.mark_awaiting_response();
4436 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4438 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4439 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4440 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4441 Some(msgs::ChannelReady {
4442 channel_id: self.context.channel_id(),
4443 next_per_commitment_point,
4444 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4448 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4449 if required_revoke.is_some() {
4450 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4452 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4455 Ok(ReestablishResponses {
4456 channel_ready, shutdown_msg, announcement_sigs,
4457 raa: required_revoke,
4458 commitment_update: None,
4459 order: self.context.resend_order.clone(),
4461 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4462 if required_revoke.is_some() {
4463 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4465 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4468 if self.context.channel_state.is_monitor_update_in_progress() {
4469 self.context.monitor_pending_commitment_signed = true;
4470 Ok(ReestablishResponses {
4471 channel_ready, shutdown_msg, announcement_sigs,
4472 commitment_update: None, raa: None,
4473 order: self.context.resend_order.clone(),
4476 Ok(ReestablishResponses {
4477 channel_ready, shutdown_msg, announcement_sigs,
4478 raa: required_revoke,
4479 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4480 order: self.context.resend_order.clone(),
4483 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4484 Err(ChannelError::Close(format!(
4485 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4486 msg.next_local_commitment_number,
4487 next_counterparty_commitment_number,
4490 Err(ChannelError::Close(format!(
4491 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4492 msg.next_local_commitment_number,
4493 next_counterparty_commitment_number,
4498 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4499 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4500 /// at which point they will be recalculated.
4501 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4503 where F::Target: FeeEstimator
4505 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4507 // Propose a range from our current Background feerate to our Normal feerate plus our
4508 // force_close_avoidance_max_fee_satoshis.
4509 // If we fail to come to consensus, we'll have to force-close.
4510 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4511 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4512 // that we don't expect to need fee bumping
4513 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4514 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4516 // The spec requires that (when the channel does not have anchors) we only send absolute
4517 // channel fees no greater than the absolute channel fee on the current commitment
4518 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4519 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4520 // some force-closure by old nodes, but we wanted to close the channel anyway.
4522 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4523 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4524 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4525 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4528 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4529 // below our dust limit, causing the output to disappear. We don't bother handling this
4530 // case, however, as this should only happen if a channel is closed before any (material)
4531 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4532 // come to consensus with our counterparty on appropriate fees, however it should be a
4533 // relatively rare case. We can revisit this later, though note that in order to determine
4534 // if the funders' output is dust we have to know the absolute fee we're going to use.
4535 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4536 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4537 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4538 // We always add force_close_avoidance_max_fee_satoshis to our normal
4539 // feerate-calculated fee, but allow the max to be overridden if we're using a
4540 // target feerate-calculated fee.
4541 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4542 proposed_max_feerate as u64 * tx_weight / 1000)
4544 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4547 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4548 self.context.closing_fee_limits.clone().unwrap()
4551 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4552 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4553 /// this point if we're the funder we should send the initial closing_signed, and in any case
4554 /// shutdown should complete within a reasonable timeframe.
4555 fn closing_negotiation_ready(&self) -> bool {
4556 self.context.closing_negotiation_ready()
4559 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4560 /// an Err if no progress is being made and the channel should be force-closed instead.
4561 /// Should be called on a one-minute timer.
4562 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4563 if self.closing_negotiation_ready() {
4564 if self.context.closing_signed_in_flight {
4565 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4567 self.context.closing_signed_in_flight = true;
4573 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4574 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4575 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4576 where F::Target: FeeEstimator, L::Target: Logger
4578 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4579 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4580 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4581 // that closing_negotiation_ready checks this case (as well as a few others).
4582 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4583 return Ok((None, None, None));
4586 if !self.context.is_outbound() {
4587 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4588 return self.closing_signed(fee_estimator, &msg);
4590 return Ok((None, None, None));
4593 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4594 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4595 if self.context.expecting_peer_commitment_signed {
4596 return Ok((None, None, None));
4599 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4601 assert!(self.context.shutdown_scriptpubkey.is_some());
4602 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4603 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4604 our_min_fee, our_max_fee, total_fee_satoshis);
4606 match &self.context.holder_signer {
4607 ChannelSignerType::Ecdsa(ecdsa) => {
4609 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4610 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4612 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4613 Ok((Some(msgs::ClosingSigned {
4614 channel_id: self.context.channel_id,
4615 fee_satoshis: total_fee_satoshis,
4617 fee_range: Some(msgs::ClosingSignedFeeRange {
4618 min_fee_satoshis: our_min_fee,
4619 max_fee_satoshis: our_max_fee,
4623 // TODO (taproot|arik)
4629 // Marks a channel as waiting for a response from the counterparty. If it's not received
4630 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4632 fn mark_awaiting_response(&mut self) {
4633 self.context.sent_message_awaiting_response = Some(0);
4636 /// Determines whether we should disconnect the counterparty due to not receiving a response
4637 /// within our expected timeframe.
4639 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4640 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4641 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4644 // Don't disconnect when we're not waiting on a response.
4647 *ticks_elapsed += 1;
4648 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4652 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4653 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4655 if self.context.channel_state.is_peer_disconnected() {
4656 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4658 if self.context.channel_state.is_pre_funded_state() {
4659 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4660 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4661 // can do that via error message without getting a connection fail anyway...
4662 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4664 for htlc in self.context.pending_inbound_htlcs.iter() {
4665 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4666 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4669 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4671 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4672 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4675 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4676 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4677 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4680 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4683 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4684 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4685 // any further commitment updates after we set LocalShutdownSent.
4686 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4688 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4691 assert!(send_shutdown);
4692 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4693 Ok(scriptpubkey) => scriptpubkey,
4694 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4696 if !shutdown_scriptpubkey.is_compatible(their_features) {
4697 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4699 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4704 // From here on out, we may not fail!
4706 self.context.channel_state.set_remote_shutdown_sent();
4707 self.context.update_time_counter += 1;
4709 let monitor_update = if update_shutdown_script {
4710 self.context.latest_monitor_update_id += 1;
4711 let monitor_update = ChannelMonitorUpdate {
4712 update_id: self.context.latest_monitor_update_id,
4713 counterparty_node_id: Some(self.context.counterparty_node_id),
4714 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4715 scriptpubkey: self.get_closing_scriptpubkey(),
4718 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4719 self.push_ret_blockable_mon_update(monitor_update)
4721 let shutdown = if send_shutdown {
4722 Some(msgs::Shutdown {
4723 channel_id: self.context.channel_id,
4724 scriptpubkey: self.get_closing_scriptpubkey(),
4728 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4729 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4730 // cell HTLCs and return them to fail the payment.
4731 self.context.holding_cell_update_fee = None;
4732 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4733 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4735 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4736 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4743 self.context.channel_state.set_local_shutdown_sent();
4744 self.context.update_time_counter += 1;
4746 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4749 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4750 let mut tx = closing_tx.trust().built_transaction().clone();
4752 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4754 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4755 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4756 let mut holder_sig = sig.serialize_der().to_vec();
4757 holder_sig.push(EcdsaSighashType::All as u8);
4758 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4759 cp_sig.push(EcdsaSighashType::All as u8);
4760 if funding_key[..] < counterparty_funding_key[..] {
4761 tx.input[0].witness.push(holder_sig);
4762 tx.input[0].witness.push(cp_sig);
4764 tx.input[0].witness.push(cp_sig);
4765 tx.input[0].witness.push(holder_sig);
4768 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4772 pub fn closing_signed<F: Deref>(
4773 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4774 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4775 where F::Target: FeeEstimator
4777 if !self.context.channel_state.is_both_sides_shutdown() {
4778 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4780 if self.context.channel_state.is_peer_disconnected() {
4781 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4783 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4784 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4786 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4787 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4790 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4791 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4794 if self.context.channel_state.is_monitor_update_in_progress() {
4795 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4796 return Ok((None, None, None));
4799 let funding_redeemscript = self.context.get_funding_redeemscript();
4800 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4801 if used_total_fee != msg.fee_satoshis {
4802 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4804 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4806 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4809 // The remote end may have decided to revoke their output due to inconsistent dust
4810 // limits, so check for that case by re-checking the signature here.
4811 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4812 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4813 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4817 for outp in closing_tx.trust().built_transaction().output.iter() {
4818 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4819 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4823 assert!(self.context.shutdown_scriptpubkey.is_some());
4824 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4825 if last_fee == msg.fee_satoshis {
4826 let shutdown_result = ShutdownResult {
4827 monitor_update: None,
4828 dropped_outbound_htlcs: Vec::new(),
4829 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4830 channel_id: self.context.channel_id,
4831 counterparty_node_id: self.context.counterparty_node_id,
4833 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4834 self.context.channel_state = ChannelState::ShutdownComplete;
4835 self.context.update_time_counter += 1;
4836 return Ok((None, Some(tx), Some(shutdown_result)));
4840 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4842 macro_rules! propose_fee {
4843 ($new_fee: expr) => {
4844 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4845 (closing_tx, $new_fee)
4847 self.build_closing_transaction($new_fee, false)
4850 return match &self.context.holder_signer {
4851 ChannelSignerType::Ecdsa(ecdsa) => {
4853 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4854 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4855 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4856 let shutdown_result = ShutdownResult {
4857 monitor_update: None,
4858 dropped_outbound_htlcs: Vec::new(),
4859 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4860 channel_id: self.context.channel_id,
4861 counterparty_node_id: self.context.counterparty_node_id,
4863 self.context.channel_state = ChannelState::ShutdownComplete;
4864 self.context.update_time_counter += 1;
4865 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4866 (Some(tx), Some(shutdown_result))
4871 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4872 Ok((Some(msgs::ClosingSigned {
4873 channel_id: self.context.channel_id,
4874 fee_satoshis: used_fee,
4876 fee_range: Some(msgs::ClosingSignedFeeRange {
4877 min_fee_satoshis: our_min_fee,
4878 max_fee_satoshis: our_max_fee,
4880 }), signed_tx, shutdown_result))
4882 // TODO (taproot|arik)
4889 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4890 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4891 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4893 if max_fee_satoshis < our_min_fee {
4894 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4896 if min_fee_satoshis > our_max_fee {
4897 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4900 if !self.context.is_outbound() {
4901 // They have to pay, so pick the highest fee in the overlapping range.
4902 // We should never set an upper bound aside from their full balance
4903 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4904 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4906 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4907 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4908 msg.fee_satoshis, our_min_fee, our_max_fee)));
4910 // The proposed fee is in our acceptable range, accept it and broadcast!
4911 propose_fee!(msg.fee_satoshis);
4914 // Old fee style negotiation. We don't bother to enforce whether they are complying
4915 // with the "making progress" requirements, we just comply and hope for the best.
4916 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4917 if msg.fee_satoshis > last_fee {
4918 if msg.fee_satoshis < our_max_fee {
4919 propose_fee!(msg.fee_satoshis);
4920 } else if last_fee < our_max_fee {
4921 propose_fee!(our_max_fee);
4923 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4926 if msg.fee_satoshis > our_min_fee {
4927 propose_fee!(msg.fee_satoshis);
4928 } else if last_fee > our_min_fee {
4929 propose_fee!(our_min_fee);
4931 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4935 if msg.fee_satoshis < our_min_fee {
4936 propose_fee!(our_min_fee);
4937 } else if msg.fee_satoshis > our_max_fee {
4938 propose_fee!(our_max_fee);
4940 propose_fee!(msg.fee_satoshis);
4946 fn internal_htlc_satisfies_config(
4947 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4948 ) -> Result<(), (&'static str, u16)> {
4949 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4950 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4951 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4952 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4954 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4955 0x1000 | 12, // fee_insufficient
4958 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4960 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4961 0x1000 | 13, // incorrect_cltv_expiry
4967 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4968 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4969 /// unsuccessful, falls back to the previous one if one exists.
4970 pub fn htlc_satisfies_config(
4971 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4972 ) -> Result<(), (&'static str, u16)> {
4973 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4975 if let Some(prev_config) = self.context.prev_config() {
4976 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4983 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4984 self.context.cur_holder_commitment_transaction_number + 1
4987 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4988 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
4991 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4992 self.context.cur_counterparty_commitment_transaction_number + 2
4996 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
4997 &self.context.holder_signer
5001 pub fn get_value_stat(&self) -> ChannelValueStat {
5003 value_to_self_msat: self.context.value_to_self_msat,
5004 channel_value_msat: self.context.channel_value_satoshis * 1000,
5005 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5006 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5007 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5008 holding_cell_outbound_amount_msat: {
5010 for h in self.context.holding_cell_htlc_updates.iter() {
5012 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5020 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5021 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5025 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5026 /// Allowed in any state (including after shutdown)
5027 pub fn is_awaiting_monitor_update(&self) -> bool {
5028 self.context.channel_state.is_monitor_update_in_progress()
5031 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5032 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5033 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5034 self.context.blocked_monitor_updates[0].update.update_id - 1
5037 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5038 /// further blocked monitor update exists after the next.
5039 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5040 if self.context.blocked_monitor_updates.is_empty() { return None; }
5041 Some((self.context.blocked_monitor_updates.remove(0).update,
5042 !self.context.blocked_monitor_updates.is_empty()))
5045 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5046 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5047 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5048 -> Option<ChannelMonitorUpdate> {
5049 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5050 if !release_monitor {
5051 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5060 pub fn blocked_monitor_updates_pending(&self) -> usize {
5061 self.context.blocked_monitor_updates.len()
5064 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5065 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5066 /// transaction. If the channel is inbound, this implies simply that the channel has not
5068 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5069 if !self.is_awaiting_monitor_update() { return false; }
5071 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5072 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5074 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5075 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5076 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5079 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5080 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5081 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5082 // waiting for the initial monitor persistence. Thus, we check if our commitment
5083 // transaction numbers have both been iterated only exactly once (for the
5084 // funding_signed), and we're awaiting monitor update.
5086 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5087 // only way to get an awaiting-monitor-update state during initial funding is if the
5088 // initial monitor persistence is still pending).
5090 // Because deciding we're awaiting initial broadcast spuriously could result in
5091 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5092 // we hard-assert here, even in production builds.
5093 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5094 assert!(self.context.monitor_pending_channel_ready);
5095 assert_eq!(self.context.latest_monitor_update_id, 0);
5101 /// Returns true if our channel_ready has been sent
5102 pub fn is_our_channel_ready(&self) -> bool {
5103 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5104 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5107 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5108 pub fn received_shutdown(&self) -> bool {
5109 self.context.channel_state.is_remote_shutdown_sent()
5112 /// Returns true if we either initiated or agreed to shut down the channel.
5113 pub fn sent_shutdown(&self) -> bool {
5114 self.context.channel_state.is_local_shutdown_sent()
5117 /// Returns true if this channel is fully shut down. True here implies that no further actions
5118 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5119 /// will be handled appropriately by the chain monitor.
5120 pub fn is_shutdown(&self) -> bool {
5121 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5124 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5125 self.context.channel_update_status
5128 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5129 self.context.update_time_counter += 1;
5130 self.context.channel_update_status = status;
5133 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5135 // * always when a new block/transactions are confirmed with the new height
5136 // * when funding is signed with a height of 0
5137 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5141 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5142 if funding_tx_confirmations <= 0 {
5143 self.context.funding_tx_confirmation_height = 0;
5146 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5150 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5151 // channel_ready yet.
5152 if self.context.signer_pending_funding {
5156 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5157 // channel_ready until the entire batch is ready.
5158 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5159 self.context.channel_state.set_our_channel_ready();
5161 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5162 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5163 self.context.update_time_counter += 1;
5165 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5166 // We got a reorg but not enough to trigger a force close, just ignore.
5169 if self.context.funding_tx_confirmation_height != 0 &&
5170 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5172 // We should never see a funding transaction on-chain until we've received
5173 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5174 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5175 // however, may do this and we shouldn't treat it as a bug.
5176 #[cfg(not(fuzzing))]
5177 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5178 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5179 self.context.channel_state.to_u32());
5181 // We got a reorg but not enough to trigger a force close, just ignore.
5185 if need_commitment_update {
5186 if !self.context.channel_state.is_monitor_update_in_progress() {
5187 if !self.context.channel_state.is_peer_disconnected() {
5188 let next_per_commitment_point =
5189 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5190 return Some(msgs::ChannelReady {
5191 channel_id: self.context.channel_id,
5192 next_per_commitment_point,
5193 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5197 self.context.monitor_pending_channel_ready = true;
5203 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5204 /// In the first case, we store the confirmation height and calculating the short channel id.
5205 /// In the second, we simply return an Err indicating we need to be force-closed now.
5206 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5207 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5208 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5209 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5211 NS::Target: NodeSigner,
5214 let mut msgs = (None, None);
5215 if let Some(funding_txo) = self.context.get_funding_txo() {
5216 for &(index_in_block, tx) in txdata.iter() {
5217 // Check if the transaction is the expected funding transaction, and if it is,
5218 // check that it pays the right amount to the right script.
5219 if self.context.funding_tx_confirmation_height == 0 {
5220 if tx.txid() == funding_txo.txid {
5221 let txo_idx = funding_txo.index as usize;
5222 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5223 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5224 if self.context.is_outbound() {
5225 // If we generated the funding transaction and it doesn't match what it
5226 // should, the client is really broken and we should just panic and
5227 // tell them off. That said, because hash collisions happen with high
5228 // probability in fuzzing mode, if we're fuzzing we just close the
5229 // channel and move on.
5230 #[cfg(not(fuzzing))]
5231 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5233 self.context.update_time_counter += 1;
5234 let err_reason = "funding tx had wrong script/value or output index";
5235 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5237 if self.context.is_outbound() {
5238 if !tx.is_coin_base() {
5239 for input in tx.input.iter() {
5240 if input.witness.is_empty() {
5241 // We generated a malleable funding transaction, implying we've
5242 // just exposed ourselves to funds loss to our counterparty.
5243 #[cfg(not(fuzzing))]
5244 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5249 self.context.funding_tx_confirmation_height = height;
5250 self.context.funding_tx_confirmed_in = Some(*block_hash);
5251 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5252 Ok(scid) => Some(scid),
5253 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5256 // If this is a coinbase transaction and not a 0-conf channel
5257 // we should update our min_depth to 100 to handle coinbase maturity
5258 if tx.is_coin_base() &&
5259 self.context.minimum_depth.unwrap_or(0) > 0 &&
5260 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5261 self.context.minimum_depth = Some(COINBASE_MATURITY);
5264 // If we allow 1-conf funding, we may need to check for channel_ready here and
5265 // send it immediately instead of waiting for a best_block_updated call (which
5266 // may have already happened for this block).
5267 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5268 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5269 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5270 msgs = (Some(channel_ready), announcement_sigs);
5273 for inp in tx.input.iter() {
5274 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5275 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5276 return Err(ClosureReason::CommitmentTxConfirmed);
5284 /// When a new block is connected, we check the height of the block against outbound holding
5285 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5286 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5287 /// handled by the ChannelMonitor.
5289 /// If we return Err, the channel may have been closed, at which point the standard
5290 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5293 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5295 pub fn best_block_updated<NS: Deref, L: Deref>(
5296 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5297 node_signer: &NS, user_config: &UserConfig, logger: &L
5298 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5300 NS::Target: NodeSigner,
5303 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5306 fn do_best_block_updated<NS: Deref, L: Deref>(
5307 &mut self, height: u32, highest_header_time: u32,
5308 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5309 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5311 NS::Target: NodeSigner,
5314 let mut timed_out_htlcs = Vec::new();
5315 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5316 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5318 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5319 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5321 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5322 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5323 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5331 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5333 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5334 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5335 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5337 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5338 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5341 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5342 self.context.channel_state.is_our_channel_ready() {
5343 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5344 if self.context.funding_tx_confirmation_height == 0 {
5345 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5346 // zero if it has been reorged out, however in either case, our state flags
5347 // indicate we've already sent a channel_ready
5348 funding_tx_confirmations = 0;
5351 // If we've sent channel_ready (or have both sent and received channel_ready), and
5352 // the funding transaction has become unconfirmed,
5353 // close the channel and hope we can get the latest state on chain (because presumably
5354 // the funding transaction is at least still in the mempool of most nodes).
5356 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5357 // 0-conf channel, but not doing so may lead to the
5358 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5360 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5361 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5362 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5363 return Err(ClosureReason::ProcessingError { err: err_reason });
5365 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5366 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5367 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5368 // If funding_tx_confirmed_in is unset, the channel must not be active
5369 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5370 assert!(!self.context.channel_state.is_our_channel_ready());
5371 return Err(ClosureReason::FundingTimedOut);
5374 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5375 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5377 Ok((None, timed_out_htlcs, announcement_sigs))
5380 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5381 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5382 /// before the channel has reached channel_ready and we can just wait for more blocks.
5383 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5384 if self.context.funding_tx_confirmation_height != 0 {
5385 // We handle the funding disconnection by calling best_block_updated with a height one
5386 // below where our funding was connected, implying a reorg back to conf_height - 1.
5387 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5388 // We use the time field to bump the current time we set on channel updates if its
5389 // larger. If we don't know that time has moved forward, we can just set it to the last
5390 // time we saw and it will be ignored.
5391 let best_time = self.context.update_time_counter;
5392 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5393 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5394 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5395 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5396 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5402 // We never learned about the funding confirmation anyway, just ignore
5407 // Methods to get unprompted messages to send to the remote end (or where we already returned
5408 // something in the handler for the message that prompted this message):
5410 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5411 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5412 /// directions). Should be used for both broadcasted announcements and in response to an
5413 /// AnnouncementSignatures message from the remote peer.
5415 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5418 /// This will only return ChannelError::Ignore upon failure.
5420 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5421 fn get_channel_announcement<NS: Deref>(
5422 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5423 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5424 if !self.context.config.announced_channel {
5425 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5427 if !self.context.is_usable() {
5428 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5431 let short_channel_id = self.context.get_short_channel_id()
5432 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5433 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5434 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5435 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5436 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5438 let msg = msgs::UnsignedChannelAnnouncement {
5439 features: channelmanager::provided_channel_features(&user_config),
5442 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5443 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5444 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5445 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5446 excess_data: Vec::new(),
5452 fn get_announcement_sigs<NS: Deref, L: Deref>(
5453 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5454 best_block_height: u32, logger: &L
5455 ) -> Option<msgs::AnnouncementSignatures>
5457 NS::Target: NodeSigner,
5460 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5464 if !self.context.is_usable() {
5468 if self.context.channel_state.is_peer_disconnected() {
5469 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5473 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5477 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5478 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5481 log_trace!(logger, "{:?}", e);
5485 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5487 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5492 match &self.context.holder_signer {
5493 ChannelSignerType::Ecdsa(ecdsa) => {
5494 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5496 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5501 let short_channel_id = match self.context.get_short_channel_id() {
5503 None => return None,
5506 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5508 Some(msgs::AnnouncementSignatures {
5509 channel_id: self.context.channel_id(),
5511 node_signature: our_node_sig,
5512 bitcoin_signature: our_bitcoin_sig,
5515 // TODO (taproot|arik)
5521 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5523 fn sign_channel_announcement<NS: Deref>(
5524 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5525 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5526 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5527 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5528 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5529 let were_node_one = announcement.node_id_1 == our_node_key;
5531 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5532 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5533 match &self.context.holder_signer {
5534 ChannelSignerType::Ecdsa(ecdsa) => {
5535 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5536 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5537 Ok(msgs::ChannelAnnouncement {
5538 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5539 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5540 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5541 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5542 contents: announcement,
5545 // TODO (taproot|arik)
5550 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5554 /// Processes an incoming announcement_signatures message, providing a fully-signed
5555 /// channel_announcement message which we can broadcast and storing our counterparty's
5556 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5557 pub fn announcement_signatures<NS: Deref>(
5558 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5559 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5560 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5561 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5563 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5565 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5566 return Err(ChannelError::Close(format!(
5567 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5568 &announcement, self.context.get_counterparty_node_id())));
5570 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5571 return Err(ChannelError::Close(format!(
5572 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5573 &announcement, self.context.counterparty_funding_pubkey())));
5576 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5577 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5578 return Err(ChannelError::Ignore(
5579 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5582 self.sign_channel_announcement(node_signer, announcement)
5585 /// Gets a signed channel_announcement for this channel, if we previously received an
5586 /// announcement_signatures from our counterparty.
5587 pub fn get_signed_channel_announcement<NS: Deref>(
5588 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5589 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5590 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5593 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5595 Err(_) => return None,
5597 match self.sign_channel_announcement(node_signer, announcement) {
5598 Ok(res) => Some(res),
5603 /// May panic if called on a channel that wasn't immediately-previously
5604 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5605 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5606 assert!(self.context.channel_state.is_peer_disconnected());
5607 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5608 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5609 // current to_remote balances. However, it no longer has any use, and thus is now simply
5610 // set to a dummy (but valid, as required by the spec) public key.
5611 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5612 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5613 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5614 let mut pk = [2; 33]; pk[1] = 0xff;
5615 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5616 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5617 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5618 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5621 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5624 self.mark_awaiting_response();
5625 msgs::ChannelReestablish {
5626 channel_id: self.context.channel_id(),
5627 // The protocol has two different commitment number concepts - the "commitment
5628 // transaction number", which starts from 0 and counts up, and the "revocation key
5629 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5630 // commitment transaction numbers by the index which will be used to reveal the
5631 // revocation key for that commitment transaction, which means we have to convert them
5632 // to protocol-level commitment numbers here...
5634 // next_local_commitment_number is the next commitment_signed number we expect to
5635 // receive (indicating if they need to resend one that we missed).
5636 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5637 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5638 // receive, however we track it by the next commitment number for a remote transaction
5639 // (which is one further, as they always revoke previous commitment transaction, not
5640 // the one we send) so we have to decrement by 1. Note that if
5641 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5642 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5644 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5645 your_last_per_commitment_secret: remote_last_secret,
5646 my_current_per_commitment_point: dummy_pubkey,
5647 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5648 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5649 // txid of that interactive transaction, else we MUST NOT set it.
5650 next_funding_txid: None,
5655 // Send stuff to our remote peers:
5657 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5658 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5659 /// commitment update.
5661 /// `Err`s will only be [`ChannelError::Ignore`].
5662 pub fn queue_add_htlc<F: Deref, L: Deref>(
5663 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5664 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5665 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5666 ) -> Result<(), ChannelError>
5667 where F::Target: FeeEstimator, L::Target: Logger
5670 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5671 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5672 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5674 if let ChannelError::Ignore(_) = err { /* fine */ }
5675 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5680 /// Adds a pending outbound HTLC to this channel, note that you probably want
5681 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5683 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5685 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5686 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5688 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5689 /// we may not yet have sent the previous commitment update messages and will need to
5690 /// regenerate them.
5692 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5693 /// on this [`Channel`] if `force_holding_cell` is false.
5695 /// `Err`s will only be [`ChannelError::Ignore`].
5696 fn send_htlc<F: Deref, L: Deref>(
5697 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5698 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5699 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5700 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5701 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5702 where F::Target: FeeEstimator, L::Target: Logger
5704 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5705 self.context.channel_state.is_local_shutdown_sent() ||
5706 self.context.channel_state.is_remote_shutdown_sent()
5708 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5710 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5711 if amount_msat > channel_total_msat {
5712 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5715 if amount_msat == 0 {
5716 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5719 let available_balances = self.context.get_available_balances(fee_estimator);
5720 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5721 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5722 available_balances.next_outbound_htlc_minimum_msat)));
5725 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5726 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5727 available_balances.next_outbound_htlc_limit_msat)));
5730 if self.context.channel_state.is_peer_disconnected() {
5731 // Note that this should never really happen, if we're !is_live() on receipt of an
5732 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5733 // the user to send directly into a !is_live() channel. However, if we
5734 // disconnected during the time the previous hop was doing the commitment dance we may
5735 // end up getting here after the forwarding delay. In any case, returning an
5736 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5737 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5740 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5741 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5742 payment_hash, amount_msat,
5743 if force_holding_cell { "into holding cell" }
5744 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5745 else { "to peer" });
5747 if need_holding_cell {
5748 force_holding_cell = true;
5751 // Now update local state:
5752 if force_holding_cell {
5753 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5758 onion_routing_packet,
5765 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5766 htlc_id: self.context.next_holder_htlc_id,
5768 payment_hash: payment_hash.clone(),
5770 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5776 let res = msgs::UpdateAddHTLC {
5777 channel_id: self.context.channel_id,
5778 htlc_id: self.context.next_holder_htlc_id,
5782 onion_routing_packet,
5786 self.context.next_holder_htlc_id += 1;
5791 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5792 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5793 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5794 // fail to generate this, we still are at least at a position where upgrading their status
5796 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5797 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5798 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5800 if let Some(state) = new_state {
5801 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5805 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5806 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5807 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5808 // Grab the preimage, if it exists, instead of cloning
5809 let mut reason = OutboundHTLCOutcome::Success(None);
5810 mem::swap(outcome, &mut reason);
5811 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5814 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5815 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5816 debug_assert!(!self.context.is_outbound());
5817 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5818 self.context.feerate_per_kw = feerate;
5819 self.context.pending_update_fee = None;
5822 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5824 let (mut htlcs_ref, counterparty_commitment_tx) =
5825 self.build_commitment_no_state_update(logger);
5826 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5827 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5828 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5830 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5831 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5834 self.context.latest_monitor_update_id += 1;
5835 let monitor_update = ChannelMonitorUpdate {
5836 update_id: self.context.latest_monitor_update_id,
5837 counterparty_node_id: Some(self.context.counterparty_node_id),
5838 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5839 commitment_txid: counterparty_commitment_txid,
5840 htlc_outputs: htlcs.clone(),
5841 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5842 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5843 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5844 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5845 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5848 self.context.channel_state.set_awaiting_remote_revoke();
5852 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5853 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5854 where L::Target: Logger
5856 let counterparty_keys = self.context.build_remote_transaction_keys();
5857 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5858 let counterparty_commitment_tx = commitment_stats.tx;
5860 #[cfg(any(test, fuzzing))]
5862 if !self.context.is_outbound() {
5863 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5864 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5865 if let Some(info) = projected_commit_tx_info {
5866 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5867 if info.total_pending_htlcs == total_pending_htlcs
5868 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5869 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5870 && info.feerate == self.context.feerate_per_kw {
5871 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5872 assert_eq!(actual_fee, info.fee);
5878 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5881 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5882 /// generation when we shouldn't change HTLC/channel state.
5883 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5884 // Get the fee tests from `build_commitment_no_state_update`
5885 #[cfg(any(test, fuzzing))]
5886 self.build_commitment_no_state_update(logger);
5888 let counterparty_keys = self.context.build_remote_transaction_keys();
5889 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5890 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5892 match &self.context.holder_signer {
5893 ChannelSignerType::Ecdsa(ecdsa) => {
5894 let (signature, htlc_signatures);
5897 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5898 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5902 let res = ecdsa.sign_counterparty_commitment(
5903 &commitment_stats.tx,
5904 commitment_stats.inbound_htlc_preimages,
5905 commitment_stats.outbound_htlc_preimages,
5906 &self.context.secp_ctx,
5907 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5909 htlc_signatures = res.1;
5911 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5912 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5913 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5914 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5916 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5917 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5918 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5919 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5920 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5921 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5925 Ok((msgs::CommitmentSigned {
5926 channel_id: self.context.channel_id,
5930 partial_signature_with_nonce: None,
5931 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5933 // TODO (taproot|arik)
5939 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5940 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5942 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5943 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5944 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5945 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5946 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5947 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5948 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5949 where F::Target: FeeEstimator, L::Target: Logger
5951 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5952 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
5953 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5956 let monitor_update = self.build_commitment_no_status_check(logger);
5957 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5958 Ok(self.push_ret_blockable_mon_update(monitor_update))
5964 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5966 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5967 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5968 fee_base_msat: msg.contents.fee_base_msat,
5969 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5970 cltv_expiry_delta: msg.contents.cltv_expiry_delta
5972 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5974 self.context.counterparty_forwarding_info = new_forwarding_info;
5980 /// Begins the shutdown process, getting a message for the remote peer and returning all
5981 /// holding cell HTLCs for payment failure.
5982 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5983 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5984 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
5986 for htlc in self.context.pending_outbound_htlcs.iter() {
5987 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5988 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5991 if self.context.channel_state.is_local_shutdown_sent() {
5992 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5994 else if self.context.channel_state.is_remote_shutdown_sent() {
5995 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
5997 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
5998 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6000 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6001 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6002 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6005 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6008 // use override shutdown script if provided
6009 let shutdown_scriptpubkey = match override_shutdown_script {
6010 Some(script) => script,
6012 // otherwise, use the shutdown scriptpubkey provided by the signer
6013 match signer_provider.get_shutdown_scriptpubkey() {
6014 Ok(scriptpubkey) => scriptpubkey,
6015 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6019 if !shutdown_scriptpubkey.is_compatible(their_features) {
6020 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6022 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6027 // From here on out, we may not fail!
6028 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6029 self.context.channel_state.set_local_shutdown_sent();
6030 self.context.update_time_counter += 1;
6032 let monitor_update = if update_shutdown_script {
6033 self.context.latest_monitor_update_id += 1;
6034 let monitor_update = ChannelMonitorUpdate {
6035 update_id: self.context.latest_monitor_update_id,
6036 counterparty_node_id: Some(self.context.counterparty_node_id),
6037 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6038 scriptpubkey: self.get_closing_scriptpubkey(),
6041 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6042 self.push_ret_blockable_mon_update(monitor_update)
6044 let shutdown = msgs::Shutdown {
6045 channel_id: self.context.channel_id,
6046 scriptpubkey: self.get_closing_scriptpubkey(),
6049 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6050 // our shutdown until we've committed all of the pending changes.
6051 self.context.holding_cell_update_fee = None;
6052 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6053 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6055 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6056 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6063 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6064 "we can't both complete shutdown and return a monitor update");
6066 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6069 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6070 self.context.holding_cell_htlc_updates.iter()
6071 .flat_map(|htlc_update| {
6073 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6074 => Some((source, payment_hash)),
6078 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6082 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6083 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6084 pub context: ChannelContext<SP>,
6085 pub unfunded_context: UnfundedChannelContext,
6088 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6089 pub fn new<ES: Deref, F: Deref>(
6090 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6091 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6092 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6093 ) -> Result<OutboundV1Channel<SP>, APIError>
6094 where ES::Target: EntropySource,
6095 F::Target: FeeEstimator
6097 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6098 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6099 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6100 let pubkeys = holder_signer.pubkeys().clone();
6102 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6103 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6105 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6106 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6108 let channel_value_msat = channel_value_satoshis * 1000;
6109 if push_msat > channel_value_msat {
6110 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6112 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6113 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6115 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6116 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6117 // Protocol level safety check in place, although it should never happen because
6118 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6119 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6122 let channel_type = Self::get_initial_channel_type(&config, their_features);
6123 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6125 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6126 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6128 (ConfirmationTarget::NonAnchorChannelFee, 0)
6130 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6132 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6133 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6134 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6135 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6138 let mut secp_ctx = Secp256k1::new();
6139 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6141 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6142 match signer_provider.get_shutdown_scriptpubkey() {
6143 Ok(scriptpubkey) => Some(scriptpubkey),
6144 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6148 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6149 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6150 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6154 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6155 Ok(script) => script,
6156 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6159 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6162 context: ChannelContext {
6165 config: LegacyChannelConfig {
6166 options: config.channel_config.clone(),
6167 announced_channel: config.channel_handshake_config.announced_channel,
6168 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6173 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6175 channel_id: temporary_channel_id,
6176 temporary_channel_id: Some(temporary_channel_id),
6177 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6178 announcement_sigs_state: AnnouncementSigsState::NotSent,
6180 channel_value_satoshis,
6182 latest_monitor_update_id: 0,
6184 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6185 shutdown_scriptpubkey,
6188 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6189 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6192 pending_inbound_htlcs: Vec::new(),
6193 pending_outbound_htlcs: Vec::new(),
6194 holding_cell_htlc_updates: Vec::new(),
6195 pending_update_fee: None,
6196 holding_cell_update_fee: None,
6197 next_holder_htlc_id: 0,
6198 next_counterparty_htlc_id: 0,
6199 update_time_counter: 1,
6201 resend_order: RAACommitmentOrder::CommitmentFirst,
6203 monitor_pending_channel_ready: false,
6204 monitor_pending_revoke_and_ack: false,
6205 monitor_pending_commitment_signed: false,
6206 monitor_pending_forwards: Vec::new(),
6207 monitor_pending_failures: Vec::new(),
6208 monitor_pending_finalized_fulfills: Vec::new(),
6210 signer_pending_commitment_update: false,
6211 signer_pending_funding: false,
6213 #[cfg(debug_assertions)]
6214 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6215 #[cfg(debug_assertions)]
6216 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6218 last_sent_closing_fee: None,
6219 pending_counterparty_closing_signed: None,
6220 expecting_peer_commitment_signed: false,
6221 closing_fee_limits: None,
6222 target_closing_feerate_sats_per_kw: None,
6224 funding_tx_confirmed_in: None,
6225 funding_tx_confirmation_height: 0,
6226 short_channel_id: None,
6227 channel_creation_height: current_chain_height,
6229 feerate_per_kw: commitment_feerate,
6230 counterparty_dust_limit_satoshis: 0,
6231 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6232 counterparty_max_htlc_value_in_flight_msat: 0,
6233 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6234 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6235 holder_selected_channel_reserve_satoshis,
6236 counterparty_htlc_minimum_msat: 0,
6237 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6238 counterparty_max_accepted_htlcs: 0,
6239 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6240 minimum_depth: None, // Filled in in accept_channel
6242 counterparty_forwarding_info: None,
6244 channel_transaction_parameters: ChannelTransactionParameters {
6245 holder_pubkeys: pubkeys,
6246 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6247 is_outbound_from_holder: true,
6248 counterparty_parameters: None,
6249 funding_outpoint: None,
6250 channel_type_features: channel_type.clone()
6252 funding_transaction: None,
6253 is_batch_funding: None,
6255 counterparty_cur_commitment_point: None,
6256 counterparty_prev_commitment_point: None,
6257 counterparty_node_id,
6259 counterparty_shutdown_scriptpubkey: None,
6261 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6263 channel_update_status: ChannelUpdateStatus::Enabled,
6264 closing_signed_in_flight: false,
6266 announcement_sigs: None,
6268 #[cfg(any(test, fuzzing))]
6269 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6270 #[cfg(any(test, fuzzing))]
6271 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6273 workaround_lnd_bug_4006: None,
6274 sent_message_awaiting_response: None,
6276 latest_inbound_scid_alias: None,
6277 outbound_scid_alias,
6279 channel_pending_event_emitted: false,
6280 channel_ready_event_emitted: false,
6282 #[cfg(any(test, fuzzing))]
6283 historical_inbound_htlc_fulfills: HashSet::new(),
6288 blocked_monitor_updates: Vec::new(),
6290 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6294 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6295 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6296 let counterparty_keys = self.context.build_remote_transaction_keys();
6297 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6298 let signature = match &self.context.holder_signer {
6299 // TODO (taproot|arik): move match into calling method for Taproot
6300 ChannelSignerType::Ecdsa(ecdsa) => {
6301 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6302 .map(|(sig, _)| sig).ok()?
6304 // TODO (taproot|arik)
6309 if self.context.signer_pending_funding {
6310 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6311 self.context.signer_pending_funding = false;
6314 Some(msgs::FundingCreated {
6315 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6316 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6317 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6320 partial_signature_with_nonce: None,
6322 next_local_nonce: None,
6326 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6327 /// a funding_created message for the remote peer.
6328 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6329 /// or if called on an inbound channel.
6330 /// Note that channel_id changes during this call!
6331 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6332 /// If an Err is returned, it is a ChannelError::Close.
6333 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6334 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6335 if !self.context.is_outbound() {
6336 panic!("Tried to create outbound funding_created message on an inbound channel!");
6339 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6340 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6342 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6344 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6345 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6346 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6347 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6350 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6351 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6353 // Now that we're past error-generating stuff, update our local state:
6355 self.context.channel_state = ChannelState::FundingNegotiated;
6356 self.context.channel_id = funding_txo.to_channel_id();
6358 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6359 // We can skip this if it is a zero-conf channel.
6360 if funding_transaction.is_coin_base() &&
6361 self.context.minimum_depth.unwrap_or(0) > 0 &&
6362 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6363 self.context.minimum_depth = Some(COINBASE_MATURITY);
6366 self.context.funding_transaction = Some(funding_transaction);
6367 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6369 let funding_created = self.get_funding_created_msg(logger);
6370 if funding_created.is_none() {
6371 if !self.context.signer_pending_funding {
6372 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6373 self.context.signer_pending_funding = true;
6380 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6381 // The default channel type (ie the first one we try) depends on whether the channel is
6382 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6383 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6384 // with no other changes, and fall back to `only_static_remotekey`.
6385 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6386 if !config.channel_handshake_config.announced_channel &&
6387 config.channel_handshake_config.negotiate_scid_privacy &&
6388 their_features.supports_scid_privacy() {
6389 ret.set_scid_privacy_required();
6392 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6393 // set it now. If they don't understand it, we'll fall back to our default of
6394 // `only_static_remotekey`.
6395 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6396 their_features.supports_anchors_zero_fee_htlc_tx() {
6397 ret.set_anchors_zero_fee_htlc_tx_required();
6403 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6404 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6405 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6406 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6407 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6408 ) -> Result<msgs::OpenChannel, ()>
6410 F::Target: FeeEstimator
6412 if !self.context.is_outbound() ||
6414 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6415 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6420 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6421 // We've exhausted our options
6424 // We support opening a few different types of channels. Try removing our additional
6425 // features one by one until we've either arrived at our default or the counterparty has
6428 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6429 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6430 // checks whether the counterparty supports every feature, this would only happen if the
6431 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6433 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6434 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6435 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6436 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6437 } else if self.context.channel_type.supports_scid_privacy() {
6438 self.context.channel_type.clear_scid_privacy();
6440 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6442 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6443 Ok(self.get_open_channel(chain_hash))
6446 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6447 if !self.context.is_outbound() {
6448 panic!("Tried to open a channel for an inbound channel?");
6450 if self.context.have_received_message() {
6451 panic!("Cannot generate an open_channel after we've moved forward");
6454 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6455 panic!("Tried to send an open_channel for a channel that has already advanced");
6458 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6459 let keys = self.context.get_holder_pubkeys();
6463 temporary_channel_id: self.context.channel_id,
6464 funding_satoshis: self.context.channel_value_satoshis,
6465 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6466 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6467 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6468 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6469 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6470 feerate_per_kw: self.context.feerate_per_kw as u32,
6471 to_self_delay: self.context.get_holder_selected_contest_delay(),
6472 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6473 funding_pubkey: keys.funding_pubkey,
6474 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6475 payment_point: keys.payment_point,
6476 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6477 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6478 first_per_commitment_point,
6479 channel_flags: if self.context.config.announced_channel {1} else {0},
6480 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6481 Some(script) => script.clone().into_inner(),
6482 None => Builder::new().into_script(),
6484 channel_type: Some(self.context.channel_type.clone()),
6489 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6490 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6492 // Check sanity of message fields:
6493 if !self.context.is_outbound() {
6494 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6496 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6497 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6499 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6500 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6502 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6503 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6505 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6506 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6508 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6509 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6510 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6512 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6513 if msg.htlc_minimum_msat >= full_channel_value_msat {
6514 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6516 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6517 if msg.to_self_delay > max_delay_acceptable {
6518 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6520 if msg.max_accepted_htlcs < 1 {
6521 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6523 if msg.max_accepted_htlcs > MAX_HTLCS {
6524 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6527 // Now check against optional parameters as set by config...
6528 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6529 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6531 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6532 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6534 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6535 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6537 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6538 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6540 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6541 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6543 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6544 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6546 if msg.minimum_depth > peer_limits.max_minimum_depth {
6547 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6550 if let Some(ty) = &msg.channel_type {
6551 if *ty != self.context.channel_type {
6552 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6554 } else if their_features.supports_channel_type() {
6555 // Assume they've accepted the channel type as they said they understand it.
6557 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6558 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6559 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6561 self.context.channel_type = channel_type.clone();
6562 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6565 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6566 match &msg.shutdown_scriptpubkey {
6567 &Some(ref script) => {
6568 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6569 if script.len() == 0 {
6572 if !script::is_bolt2_compliant(&script, their_features) {
6573 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6575 Some(script.clone())
6578 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6580 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6585 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6586 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6587 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6588 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6589 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6591 if peer_limits.trust_own_funding_0conf {
6592 self.context.minimum_depth = Some(msg.minimum_depth);
6594 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6597 let counterparty_pubkeys = ChannelPublicKeys {
6598 funding_pubkey: msg.funding_pubkey,
6599 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6600 payment_point: msg.payment_point,
6601 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6602 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6605 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6606 selected_contest_delay: msg.to_self_delay,
6607 pubkeys: counterparty_pubkeys,
6610 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6611 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6613 self.context.channel_state = ChannelState::NegotiatingFunding(
6614 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6616 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6621 /// Handles a funding_signed message from the remote end.
6622 /// If this call is successful, broadcast the funding transaction (and not before!)
6623 pub fn funding_signed<L: Deref>(
6624 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6625 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6629 if !self.context.is_outbound() {
6630 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6632 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6633 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6635 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6636 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6637 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6638 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6641 let funding_script = self.context.get_funding_redeemscript();
6643 let counterparty_keys = self.context.build_remote_transaction_keys();
6644 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6645 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6646 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6648 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6649 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6651 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6652 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6654 let trusted_tx = initial_commitment_tx.trust();
6655 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6656 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6657 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6658 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6659 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6663 let holder_commitment_tx = HolderCommitmentTransaction::new(
6664 initial_commitment_tx,
6667 &self.context.get_holder_pubkeys().funding_pubkey,
6668 self.context.counterparty_funding_pubkey()
6672 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6673 if validated.is_err() {
6674 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6677 let funding_redeemscript = self.context.get_funding_redeemscript();
6678 let funding_txo = self.context.get_funding_txo().unwrap();
6679 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6680 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6681 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6682 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6683 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6684 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6685 shutdown_script, self.context.get_holder_selected_contest_delay(),
6686 &self.context.destination_script, (funding_txo, funding_txo_script),
6687 &self.context.channel_transaction_parameters,
6688 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6690 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6691 channel_monitor.provide_initial_counterparty_commitment_tx(
6692 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6693 self.context.cur_counterparty_commitment_transaction_number,
6694 self.context.counterparty_cur_commitment_point.unwrap(),
6695 counterparty_initial_commitment_tx.feerate_per_kw(),
6696 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6697 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6699 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6700 if self.context.is_batch_funding() {
6701 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6703 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6705 self.context.cur_holder_commitment_transaction_number -= 1;
6706 self.context.cur_counterparty_commitment_transaction_number -= 1;
6708 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6710 let mut channel = Channel { context: self.context };
6712 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6713 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6714 Ok((channel, channel_monitor))
6717 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6720 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6721 if self.context.signer_pending_funding && self.context.is_outbound() {
6722 log_trace!(logger, "Signer unblocked a funding_created");
6723 self.get_funding_created_msg(logger)
6728 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6729 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6730 pub context: ChannelContext<SP>,
6731 pub unfunded_context: UnfundedChannelContext,
6734 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6735 /// Creates a new channel from a remote sides' request for one.
6736 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6737 pub fn new<ES: Deref, F: Deref, L: Deref>(
6738 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6739 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6740 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6741 current_chain_height: u32, logger: &L, is_0conf: bool,
6742 ) -> Result<InboundV1Channel<SP>, ChannelError>
6743 where ES::Target: EntropySource,
6744 F::Target: FeeEstimator,
6747 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6748 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6750 // First check the channel type is known, failing before we do anything else if we don't
6751 // support this channel type.
6752 let channel_type = if let Some(channel_type) = &msg.channel_type {
6753 if channel_type.supports_any_optional_bits() {
6754 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6757 // We only support the channel types defined by the `ChannelManager` in
6758 // `provided_channel_type_features`. The channel type must always support
6759 // `static_remote_key`.
6760 if !channel_type.requires_static_remote_key() {
6761 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6763 // Make sure we support all of the features behind the channel type.
6764 if !channel_type.is_subset(our_supported_features) {
6765 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6767 if channel_type.requires_scid_privacy() && announced_channel {
6768 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6770 channel_type.clone()
6772 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6773 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6774 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6779 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6780 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6781 let pubkeys = holder_signer.pubkeys().clone();
6782 let counterparty_pubkeys = ChannelPublicKeys {
6783 funding_pubkey: msg.funding_pubkey,
6784 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6785 payment_point: msg.payment_point,
6786 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6787 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6790 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6791 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6794 // Check sanity of message fields:
6795 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6796 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6798 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6799 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6801 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6802 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6804 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6805 if msg.push_msat > full_channel_value_msat {
6806 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6808 if msg.dust_limit_satoshis > msg.funding_satoshis {
6809 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6811 if msg.htlc_minimum_msat >= full_channel_value_msat {
6812 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6814 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6816 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6817 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6818 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6820 if msg.max_accepted_htlcs < 1 {
6821 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6823 if msg.max_accepted_htlcs > MAX_HTLCS {
6824 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6827 // Now check against optional parameters as set by config...
6828 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6829 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6831 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6832 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6834 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6835 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6837 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6838 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6840 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6841 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6843 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6844 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6846 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6847 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6850 // Convert things into internal flags and prep our state:
6852 if config.channel_handshake_limits.force_announced_channel_preference {
6853 if config.channel_handshake_config.announced_channel != announced_channel {
6854 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6858 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6859 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6860 // Protocol level safety check in place, although it should never happen because
6861 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6862 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6864 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6865 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6867 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6868 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6869 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6871 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6872 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6875 // check if the funder's amount for the initial commitment tx is sufficient
6876 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6877 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6878 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6882 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6883 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6884 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6885 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6888 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6889 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6890 // want to push much to us), our counterparty should always have more than our reserve.
6891 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6892 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6895 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6896 match &msg.shutdown_scriptpubkey {
6897 &Some(ref script) => {
6898 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6899 if script.len() == 0 {
6902 if !script::is_bolt2_compliant(&script, their_features) {
6903 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6905 Some(script.clone())
6908 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6910 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6915 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6916 match signer_provider.get_shutdown_scriptpubkey() {
6917 Ok(scriptpubkey) => Some(scriptpubkey),
6918 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6922 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6923 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6924 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6928 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6929 Ok(script) => script,
6930 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6933 let mut secp_ctx = Secp256k1::new();
6934 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6936 let minimum_depth = if is_0conf {
6939 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6943 context: ChannelContext {
6946 config: LegacyChannelConfig {
6947 options: config.channel_config.clone(),
6949 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6954 inbound_handshake_limits_override: None,
6956 temporary_channel_id: Some(msg.temporary_channel_id),
6957 channel_id: msg.temporary_channel_id,
6958 channel_state: ChannelState::NegotiatingFunding(
6959 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6961 announcement_sigs_state: AnnouncementSigsState::NotSent,
6964 latest_monitor_update_id: 0,
6966 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6967 shutdown_scriptpubkey,
6970 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6971 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6972 value_to_self_msat: msg.push_msat,
6974 pending_inbound_htlcs: Vec::new(),
6975 pending_outbound_htlcs: Vec::new(),
6976 holding_cell_htlc_updates: Vec::new(),
6977 pending_update_fee: None,
6978 holding_cell_update_fee: None,
6979 next_holder_htlc_id: 0,
6980 next_counterparty_htlc_id: 0,
6981 update_time_counter: 1,
6983 resend_order: RAACommitmentOrder::CommitmentFirst,
6985 monitor_pending_channel_ready: false,
6986 monitor_pending_revoke_and_ack: false,
6987 monitor_pending_commitment_signed: false,
6988 monitor_pending_forwards: Vec::new(),
6989 monitor_pending_failures: Vec::new(),
6990 monitor_pending_finalized_fulfills: Vec::new(),
6992 signer_pending_commitment_update: false,
6993 signer_pending_funding: false,
6995 #[cfg(debug_assertions)]
6996 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
6997 #[cfg(debug_assertions)]
6998 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7000 last_sent_closing_fee: None,
7001 pending_counterparty_closing_signed: None,
7002 expecting_peer_commitment_signed: false,
7003 closing_fee_limits: None,
7004 target_closing_feerate_sats_per_kw: None,
7006 funding_tx_confirmed_in: None,
7007 funding_tx_confirmation_height: 0,
7008 short_channel_id: None,
7009 channel_creation_height: current_chain_height,
7011 feerate_per_kw: msg.feerate_per_kw,
7012 channel_value_satoshis: msg.funding_satoshis,
7013 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7014 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7015 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7016 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7017 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7018 holder_selected_channel_reserve_satoshis,
7019 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7020 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7021 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7022 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7025 counterparty_forwarding_info: None,
7027 channel_transaction_parameters: ChannelTransactionParameters {
7028 holder_pubkeys: pubkeys,
7029 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7030 is_outbound_from_holder: false,
7031 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7032 selected_contest_delay: msg.to_self_delay,
7033 pubkeys: counterparty_pubkeys,
7035 funding_outpoint: None,
7036 channel_type_features: channel_type.clone()
7038 funding_transaction: None,
7039 is_batch_funding: None,
7041 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7042 counterparty_prev_commitment_point: None,
7043 counterparty_node_id,
7045 counterparty_shutdown_scriptpubkey,
7047 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7049 channel_update_status: ChannelUpdateStatus::Enabled,
7050 closing_signed_in_flight: false,
7052 announcement_sigs: None,
7054 #[cfg(any(test, fuzzing))]
7055 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7056 #[cfg(any(test, fuzzing))]
7057 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7059 workaround_lnd_bug_4006: None,
7060 sent_message_awaiting_response: None,
7062 latest_inbound_scid_alias: None,
7063 outbound_scid_alias: 0,
7065 channel_pending_event_emitted: false,
7066 channel_ready_event_emitted: false,
7068 #[cfg(any(test, fuzzing))]
7069 historical_inbound_htlc_fulfills: HashSet::new(),
7074 blocked_monitor_updates: Vec::new(),
7076 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7082 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7083 /// should be sent back to the counterparty node.
7085 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7086 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7087 if self.context.is_outbound() {
7088 panic!("Tried to send accept_channel for an outbound channel?");
7091 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7092 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7094 panic!("Tried to send accept_channel after channel had moved forward");
7096 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7097 panic!("Tried to send an accept_channel for a channel that has already advanced");
7100 self.generate_accept_channel_message()
7103 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7104 /// inbound channel. If the intention is to accept an inbound channel, use
7105 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7107 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7108 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7109 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7110 let keys = self.context.get_holder_pubkeys();
7112 msgs::AcceptChannel {
7113 temporary_channel_id: self.context.channel_id,
7114 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7115 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7116 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7117 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7118 minimum_depth: self.context.minimum_depth.unwrap(),
7119 to_self_delay: self.context.get_holder_selected_contest_delay(),
7120 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7121 funding_pubkey: keys.funding_pubkey,
7122 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7123 payment_point: keys.payment_point,
7124 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7125 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7126 first_per_commitment_point,
7127 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7128 Some(script) => script.clone().into_inner(),
7129 None => Builder::new().into_script(),
7131 channel_type: Some(self.context.channel_type.clone()),
7133 next_local_nonce: None,
7137 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7138 /// inbound channel without accepting it.
7140 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7142 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7143 self.generate_accept_channel_message()
7146 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7147 let funding_script = self.context.get_funding_redeemscript();
7149 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7150 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7151 let trusted_tx = initial_commitment_tx.trust();
7152 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7153 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7154 // They sign the holder commitment transaction...
7155 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7156 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7157 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7158 encode::serialize_hex(&funding_script), &self.context.channel_id());
7159 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7161 Ok(initial_commitment_tx)
7164 pub fn funding_created<L: Deref>(
7165 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7166 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7170 if self.context.is_outbound() {
7171 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7174 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7175 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7177 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7178 // remember the channel, so it's safe to just send an error_message here and drop the
7180 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7182 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7183 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7184 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7185 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7188 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7189 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7190 // This is an externally observable change before we finish all our checks. In particular
7191 // check_funding_created_signature may fail.
7192 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7194 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7196 Err(ChannelError::Close(e)) => {
7197 self.context.channel_transaction_parameters.funding_outpoint = None;
7198 return Err((self, ChannelError::Close(e)));
7201 // The only error we know how to handle is ChannelError::Close, so we fall over here
7202 // to make sure we don't continue with an inconsistent state.
7203 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7207 let holder_commitment_tx = HolderCommitmentTransaction::new(
7208 initial_commitment_tx,
7211 &self.context.get_holder_pubkeys().funding_pubkey,
7212 self.context.counterparty_funding_pubkey()
7215 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7216 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7219 // Now that we're past error-generating stuff, update our local state:
7221 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7222 self.context.channel_id = funding_txo.to_channel_id();
7223 self.context.cur_counterparty_commitment_transaction_number -= 1;
7224 self.context.cur_holder_commitment_transaction_number -= 1;
7226 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7228 let funding_redeemscript = self.context.get_funding_redeemscript();
7229 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7230 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7231 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7232 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7233 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7234 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7235 shutdown_script, self.context.get_holder_selected_contest_delay(),
7236 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7237 &self.context.channel_transaction_parameters,
7238 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7240 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7241 channel_monitor.provide_initial_counterparty_commitment_tx(
7242 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7243 self.context.cur_counterparty_commitment_transaction_number + 1,
7244 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7245 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7246 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7248 log_info!(logger, "{} funding_signed for peer for channel {}",
7249 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7251 // Promote the channel to a full-fledged one now that we have updated the state and have a
7252 // `ChannelMonitor`.
7253 let mut channel = Channel {
7254 context: self.context,
7256 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7257 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7259 Ok((channel, funding_signed, channel_monitor))
7263 const SERIALIZATION_VERSION: u8 = 3;
7264 const MIN_SERIALIZATION_VERSION: u8 = 3;
7266 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7272 impl Writeable for ChannelUpdateStatus {
7273 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7274 // We only care about writing out the current state as it was announced, ie only either
7275 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7276 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7278 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7279 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7280 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7281 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7287 impl Readable for ChannelUpdateStatus {
7288 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7289 Ok(match <u8 as Readable>::read(reader)? {
7290 0 => ChannelUpdateStatus::Enabled,
7291 1 => ChannelUpdateStatus::Disabled,
7292 _ => return Err(DecodeError::InvalidValue),
7297 impl Writeable for AnnouncementSigsState {
7298 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7299 // We only care about writing out the current state as if we had just disconnected, at
7300 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7302 AnnouncementSigsState::NotSent => 0u8.write(writer),
7303 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7304 AnnouncementSigsState::Committed => 0u8.write(writer),
7305 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7310 impl Readable for AnnouncementSigsState {
7311 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7312 Ok(match <u8 as Readable>::read(reader)? {
7313 0 => AnnouncementSigsState::NotSent,
7314 1 => AnnouncementSigsState::PeerReceived,
7315 _ => return Err(DecodeError::InvalidValue),
7320 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7321 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7322 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7325 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7327 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7328 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7329 // the low bytes now and the optional high bytes later.
7330 let user_id_low = self.context.user_id as u64;
7331 user_id_low.write(writer)?;
7333 // Version 1 deserializers expected to read parts of the config object here. Version 2
7334 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7335 // `minimum_depth` we simply write dummy values here.
7336 writer.write_all(&[0; 8])?;
7338 self.context.channel_id.write(writer)?;
7340 let mut channel_state = self.context.channel_state;
7341 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7342 channel_state.set_peer_disconnected();
7344 channel_state.to_u32().write(writer)?;
7346 self.context.channel_value_satoshis.write(writer)?;
7348 self.context.latest_monitor_update_id.write(writer)?;
7350 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7351 // deserialized from that format.
7352 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7353 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7354 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7356 self.context.destination_script.write(writer)?;
7358 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7359 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7360 self.context.value_to_self_msat.write(writer)?;
7362 let mut dropped_inbound_htlcs = 0;
7363 for htlc in self.context.pending_inbound_htlcs.iter() {
7364 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7365 dropped_inbound_htlcs += 1;
7368 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7369 for htlc in self.context.pending_inbound_htlcs.iter() {
7370 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7373 htlc.htlc_id.write(writer)?;
7374 htlc.amount_msat.write(writer)?;
7375 htlc.cltv_expiry.write(writer)?;
7376 htlc.payment_hash.write(writer)?;
7378 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7379 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7381 htlc_state.write(writer)?;
7383 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7385 htlc_state.write(writer)?;
7387 &InboundHTLCState::Committed => {
7390 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7392 removal_reason.write(writer)?;
7397 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7398 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7399 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7401 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7402 for htlc in self.context.pending_outbound_htlcs.iter() {
7403 htlc.htlc_id.write(writer)?;
7404 htlc.amount_msat.write(writer)?;
7405 htlc.cltv_expiry.write(writer)?;
7406 htlc.payment_hash.write(writer)?;
7407 htlc.source.write(writer)?;
7409 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7411 onion_packet.write(writer)?;
7413 &OutboundHTLCState::Committed => {
7416 &OutboundHTLCState::RemoteRemoved(_) => {
7417 // Treat this as a Committed because we haven't received the CS - they'll
7418 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7421 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7423 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7424 preimages.push(preimage);
7426 let reason: Option<&HTLCFailReason> = outcome.into();
7427 reason.write(writer)?;
7429 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7431 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7432 preimages.push(preimage);
7434 let reason: Option<&HTLCFailReason> = outcome.into();
7435 reason.write(writer)?;
7438 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7439 pending_outbound_blinding_points.push(htlc.blinding_point);
7442 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7443 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7444 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7445 for update in self.context.holding_cell_htlc_updates.iter() {
7447 &HTLCUpdateAwaitingACK::AddHTLC {
7448 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7449 blinding_point, skimmed_fee_msat,
7452 amount_msat.write(writer)?;
7453 cltv_expiry.write(writer)?;
7454 payment_hash.write(writer)?;
7455 source.write(writer)?;
7456 onion_routing_packet.write(writer)?;
7458 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7459 holding_cell_blinding_points.push(blinding_point);
7461 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7463 payment_preimage.write(writer)?;
7464 htlc_id.write(writer)?;
7466 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7468 htlc_id.write(writer)?;
7469 err_packet.write(writer)?;
7474 match self.context.resend_order {
7475 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7476 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7479 self.context.monitor_pending_channel_ready.write(writer)?;
7480 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7481 self.context.monitor_pending_commitment_signed.write(writer)?;
7483 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7484 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7485 pending_forward.write(writer)?;
7486 htlc_id.write(writer)?;
7489 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7490 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7491 htlc_source.write(writer)?;
7492 payment_hash.write(writer)?;
7493 fail_reason.write(writer)?;
7496 if self.context.is_outbound() {
7497 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7498 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7499 Some(feerate).write(writer)?;
7501 // As for inbound HTLCs, if the update was only announced and never committed in a
7502 // commitment_signed, drop it.
7503 None::<u32>.write(writer)?;
7505 self.context.holding_cell_update_fee.write(writer)?;
7507 self.context.next_holder_htlc_id.write(writer)?;
7508 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7509 self.context.update_time_counter.write(writer)?;
7510 self.context.feerate_per_kw.write(writer)?;
7512 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7513 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7514 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7515 // consider the stale state on reload.
7518 self.context.funding_tx_confirmed_in.write(writer)?;
7519 self.context.funding_tx_confirmation_height.write(writer)?;
7520 self.context.short_channel_id.write(writer)?;
7522 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7523 self.context.holder_dust_limit_satoshis.write(writer)?;
7524 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7526 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7527 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7529 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7530 self.context.holder_htlc_minimum_msat.write(writer)?;
7531 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7533 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7534 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7536 match &self.context.counterparty_forwarding_info {
7539 info.fee_base_msat.write(writer)?;
7540 info.fee_proportional_millionths.write(writer)?;
7541 info.cltv_expiry_delta.write(writer)?;
7543 None => 0u8.write(writer)?
7546 self.context.channel_transaction_parameters.write(writer)?;
7547 self.context.funding_transaction.write(writer)?;
7549 self.context.counterparty_cur_commitment_point.write(writer)?;
7550 self.context.counterparty_prev_commitment_point.write(writer)?;
7551 self.context.counterparty_node_id.write(writer)?;
7553 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7555 self.context.commitment_secrets.write(writer)?;
7557 self.context.channel_update_status.write(writer)?;
7559 #[cfg(any(test, fuzzing))]
7560 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7561 #[cfg(any(test, fuzzing))]
7562 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7563 htlc.write(writer)?;
7566 // If the channel type is something other than only-static-remote-key, then we need to have
7567 // older clients fail to deserialize this channel at all. If the type is
7568 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7570 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7571 Some(&self.context.channel_type) } else { None };
7573 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7574 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7575 // a different percentage of the channel value then 10%, which older versions of LDK used
7576 // to set it to before the percentage was made configurable.
7577 let serialized_holder_selected_reserve =
7578 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7579 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7581 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7582 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7583 let serialized_holder_htlc_max_in_flight =
7584 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7585 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7587 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7588 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7590 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7591 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7592 // we write the high bytes as an option here.
7593 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7595 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7597 write_tlv_fields!(writer, {
7598 (0, self.context.announcement_sigs, option),
7599 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7600 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7601 // them twice, once with their original default values above, and once as an option
7602 // here. On the read side, old versions will simply ignore the odd-type entries here,
7603 // and new versions map the default values to None and allow the TLV entries here to
7605 (1, self.context.minimum_depth, option),
7606 (2, chan_type, option),
7607 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7608 (4, serialized_holder_selected_reserve, option),
7609 (5, self.context.config, required),
7610 (6, serialized_holder_htlc_max_in_flight, option),
7611 (7, self.context.shutdown_scriptpubkey, option),
7612 (8, self.context.blocked_monitor_updates, optional_vec),
7613 (9, self.context.target_closing_feerate_sats_per_kw, option),
7614 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7615 (13, self.context.channel_creation_height, required),
7616 (15, preimages, required_vec),
7617 (17, self.context.announcement_sigs_state, required),
7618 (19, self.context.latest_inbound_scid_alias, option),
7619 (21, self.context.outbound_scid_alias, required),
7620 (23, channel_ready_event_emitted, option),
7621 (25, user_id_high_opt, option),
7622 (27, self.context.channel_keys_id, required),
7623 (28, holder_max_accepted_htlcs, option),
7624 (29, self.context.temporary_channel_id, option),
7625 (31, channel_pending_event_emitted, option),
7626 (35, pending_outbound_skimmed_fees, optional_vec),
7627 (37, holding_cell_skimmed_fees, optional_vec),
7628 (38, self.context.is_batch_funding, option),
7629 (39, pending_outbound_blinding_points, optional_vec),
7630 (41, holding_cell_blinding_points, optional_vec),
7637 const MAX_ALLOC_SIZE: usize = 64*1024;
7638 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7640 ES::Target: EntropySource,
7641 SP::Target: SignerProvider
7643 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7644 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7645 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7647 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7648 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7649 // the low bytes now and the high bytes later.
7650 let user_id_low: u64 = Readable::read(reader)?;
7652 let mut config = Some(LegacyChannelConfig::default());
7654 // Read the old serialization of the ChannelConfig from version 0.0.98.
7655 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7656 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7657 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7658 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7660 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7661 let mut _val: u64 = Readable::read(reader)?;
7664 let channel_id = Readable::read(reader)?;
7665 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7666 let channel_value_satoshis = Readable::read(reader)?;
7668 let latest_monitor_update_id = Readable::read(reader)?;
7670 let mut keys_data = None;
7672 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7673 // the `channel_keys_id` TLV is present below.
7674 let keys_len: u32 = Readable::read(reader)?;
7675 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7676 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7677 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7678 let mut data = [0; 1024];
7679 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7680 reader.read_exact(read_slice)?;
7681 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7685 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7686 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7687 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7690 let destination_script = Readable::read(reader)?;
7692 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7693 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7694 let value_to_self_msat = Readable::read(reader)?;
7696 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7698 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7699 for _ in 0..pending_inbound_htlc_count {
7700 pending_inbound_htlcs.push(InboundHTLCOutput {
7701 htlc_id: Readable::read(reader)?,
7702 amount_msat: Readable::read(reader)?,
7703 cltv_expiry: Readable::read(reader)?,
7704 payment_hash: Readable::read(reader)?,
7705 state: match <u8 as Readable>::read(reader)? {
7706 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7707 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7708 3 => InboundHTLCState::Committed,
7709 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7710 _ => return Err(DecodeError::InvalidValue),
7715 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7716 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7717 for _ in 0..pending_outbound_htlc_count {
7718 pending_outbound_htlcs.push(OutboundHTLCOutput {
7719 htlc_id: Readable::read(reader)?,
7720 amount_msat: Readable::read(reader)?,
7721 cltv_expiry: Readable::read(reader)?,
7722 payment_hash: Readable::read(reader)?,
7723 source: Readable::read(reader)?,
7724 state: match <u8 as Readable>::read(reader)? {
7725 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7726 1 => OutboundHTLCState::Committed,
7728 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7729 OutboundHTLCState::RemoteRemoved(option.into())
7732 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7733 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7736 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7737 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7739 _ => return Err(DecodeError::InvalidValue),
7741 skimmed_fee_msat: None,
7742 blinding_point: None,
7746 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7747 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7748 for _ in 0..holding_cell_htlc_update_count {
7749 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7750 0 => HTLCUpdateAwaitingACK::AddHTLC {
7751 amount_msat: Readable::read(reader)?,
7752 cltv_expiry: Readable::read(reader)?,
7753 payment_hash: Readable::read(reader)?,
7754 source: Readable::read(reader)?,
7755 onion_routing_packet: Readable::read(reader)?,
7756 skimmed_fee_msat: None,
7757 blinding_point: None,
7759 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7760 payment_preimage: Readable::read(reader)?,
7761 htlc_id: Readable::read(reader)?,
7763 2 => HTLCUpdateAwaitingACK::FailHTLC {
7764 htlc_id: Readable::read(reader)?,
7765 err_packet: Readable::read(reader)?,
7767 _ => return Err(DecodeError::InvalidValue),
7771 let resend_order = match <u8 as Readable>::read(reader)? {
7772 0 => RAACommitmentOrder::CommitmentFirst,
7773 1 => RAACommitmentOrder::RevokeAndACKFirst,
7774 _ => return Err(DecodeError::InvalidValue),
7777 let monitor_pending_channel_ready = Readable::read(reader)?;
7778 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7779 let monitor_pending_commitment_signed = Readable::read(reader)?;
7781 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7782 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7783 for _ in 0..monitor_pending_forwards_count {
7784 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7787 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7788 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7789 for _ in 0..monitor_pending_failures_count {
7790 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7793 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7795 let holding_cell_update_fee = Readable::read(reader)?;
7797 let next_holder_htlc_id = Readable::read(reader)?;
7798 let next_counterparty_htlc_id = Readable::read(reader)?;
7799 let update_time_counter = Readable::read(reader)?;
7800 let feerate_per_kw = Readable::read(reader)?;
7802 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7803 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7804 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7805 // consider the stale state on reload.
7806 match <u8 as Readable>::read(reader)? {
7809 let _: u32 = Readable::read(reader)?;
7810 let _: u64 = Readable::read(reader)?;
7811 let _: Signature = Readable::read(reader)?;
7813 _ => return Err(DecodeError::InvalidValue),
7816 let funding_tx_confirmed_in = Readable::read(reader)?;
7817 let funding_tx_confirmation_height = Readable::read(reader)?;
7818 let short_channel_id = Readable::read(reader)?;
7820 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7821 let holder_dust_limit_satoshis = Readable::read(reader)?;
7822 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7823 let mut counterparty_selected_channel_reserve_satoshis = None;
7825 // Read the old serialization from version 0.0.98.
7826 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7828 // Read the 8 bytes of backwards-compatibility data.
7829 let _dummy: u64 = Readable::read(reader)?;
7831 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7832 let holder_htlc_minimum_msat = Readable::read(reader)?;
7833 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7835 let mut minimum_depth = None;
7837 // Read the old serialization from version 0.0.98.
7838 minimum_depth = Some(Readable::read(reader)?);
7840 // Read the 4 bytes of backwards-compatibility data.
7841 let _dummy: u32 = Readable::read(reader)?;
7844 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7846 1 => Some(CounterpartyForwardingInfo {
7847 fee_base_msat: Readable::read(reader)?,
7848 fee_proportional_millionths: Readable::read(reader)?,
7849 cltv_expiry_delta: Readable::read(reader)?,
7851 _ => return Err(DecodeError::InvalidValue),
7854 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7855 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7857 let counterparty_cur_commitment_point = Readable::read(reader)?;
7859 let counterparty_prev_commitment_point = Readable::read(reader)?;
7860 let counterparty_node_id = Readable::read(reader)?;
7862 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7863 let commitment_secrets = Readable::read(reader)?;
7865 let channel_update_status = Readable::read(reader)?;
7867 #[cfg(any(test, fuzzing))]
7868 let mut historical_inbound_htlc_fulfills = HashSet::new();
7869 #[cfg(any(test, fuzzing))]
7871 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7872 for _ in 0..htlc_fulfills_len {
7873 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7877 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7878 Some((feerate, if channel_parameters.is_outbound_from_holder {
7879 FeeUpdateState::Outbound
7881 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7887 let mut announcement_sigs = None;
7888 let mut target_closing_feerate_sats_per_kw = None;
7889 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7890 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7891 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7892 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7893 // only, so we default to that if none was written.
7894 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7895 let mut channel_creation_height = Some(serialized_height);
7896 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7898 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7899 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7900 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7901 let mut latest_inbound_scid_alias = None;
7902 let mut outbound_scid_alias = None;
7903 let mut channel_pending_event_emitted = None;
7904 let mut channel_ready_event_emitted = None;
7906 let mut user_id_high_opt: Option<u64> = None;
7907 let mut channel_keys_id: Option<[u8; 32]> = None;
7908 let mut temporary_channel_id: Option<ChannelId> = None;
7909 let mut holder_max_accepted_htlcs: Option<u16> = None;
7911 let mut blocked_monitor_updates = Some(Vec::new());
7913 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7914 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7916 let mut is_batch_funding: Option<()> = None;
7918 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7919 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7921 read_tlv_fields!(reader, {
7922 (0, announcement_sigs, option),
7923 (1, minimum_depth, option),
7924 (2, channel_type, option),
7925 (3, counterparty_selected_channel_reserve_satoshis, option),
7926 (4, holder_selected_channel_reserve_satoshis, option),
7927 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7928 (6, holder_max_htlc_value_in_flight_msat, option),
7929 (7, shutdown_scriptpubkey, option),
7930 (8, blocked_monitor_updates, optional_vec),
7931 (9, target_closing_feerate_sats_per_kw, option),
7932 (11, monitor_pending_finalized_fulfills, optional_vec),
7933 (13, channel_creation_height, option),
7934 (15, preimages_opt, optional_vec),
7935 (17, announcement_sigs_state, option),
7936 (19, latest_inbound_scid_alias, option),
7937 (21, outbound_scid_alias, option),
7938 (23, channel_ready_event_emitted, option),
7939 (25, user_id_high_opt, option),
7940 (27, channel_keys_id, option),
7941 (28, holder_max_accepted_htlcs, option),
7942 (29, temporary_channel_id, option),
7943 (31, channel_pending_event_emitted, option),
7944 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7945 (37, holding_cell_skimmed_fees_opt, optional_vec),
7946 (38, is_batch_funding, option),
7947 (39, pending_outbound_blinding_points_opt, optional_vec),
7948 (41, holding_cell_blinding_points_opt, optional_vec),
7951 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7952 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7953 // If we've gotten to the funding stage of the channel, populate the signer with its
7954 // required channel parameters.
7955 if channel_state >= ChannelState::FundingNegotiated {
7956 holder_signer.provide_channel_parameters(&channel_parameters);
7958 (channel_keys_id, holder_signer)
7960 // `keys_data` can be `None` if we had corrupted data.
7961 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7962 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7963 (holder_signer.channel_keys_id(), holder_signer)
7966 if let Some(preimages) = preimages_opt {
7967 let mut iter = preimages.into_iter();
7968 for htlc in pending_outbound_htlcs.iter_mut() {
7970 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7971 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7973 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7974 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7979 // We expect all preimages to be consumed above
7980 if iter.next().is_some() {
7981 return Err(DecodeError::InvalidValue);
7985 let chan_features = channel_type.as_ref().unwrap();
7986 if !chan_features.is_subset(our_supported_features) {
7987 // If the channel was written by a new version and negotiated with features we don't
7988 // understand yet, refuse to read it.
7989 return Err(DecodeError::UnknownRequiredFeature);
7992 // ChannelTransactionParameters may have had an empty features set upon deserialization.
7993 // To account for that, we're proactively setting/overriding the field here.
7994 channel_parameters.channel_type_features = chan_features.clone();
7996 let mut secp_ctx = Secp256k1::new();
7997 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7999 // `user_id` used to be a single u64 value. In order to remain backwards
8000 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8001 // separate u64 values.
8002 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8004 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8006 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8007 let mut iter = skimmed_fees.into_iter();
8008 for htlc in pending_outbound_htlcs.iter_mut() {
8009 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8011 // We expect all skimmed fees to be consumed above
8012 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8014 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8015 let mut iter = skimmed_fees.into_iter();
8016 for htlc in holding_cell_htlc_updates.iter_mut() {
8017 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8018 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8021 // We expect all skimmed fees to be consumed above
8022 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8024 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8025 let mut iter = blinding_pts.into_iter();
8026 for htlc in pending_outbound_htlcs.iter_mut() {
8027 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8029 // We expect all blinding points to be consumed above
8030 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8032 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8033 let mut iter = blinding_pts.into_iter();
8034 for htlc in holding_cell_htlc_updates.iter_mut() {
8035 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8036 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8039 // We expect all blinding points to be consumed above
8040 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8044 context: ChannelContext {
8047 config: config.unwrap(),
8051 // Note that we don't care about serializing handshake limits as we only ever serialize
8052 // channel data after the handshake has completed.
8053 inbound_handshake_limits_override: None,
8056 temporary_channel_id,
8058 announcement_sigs_state: announcement_sigs_state.unwrap(),
8060 channel_value_satoshis,
8062 latest_monitor_update_id,
8064 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8065 shutdown_scriptpubkey,
8068 cur_holder_commitment_transaction_number,
8069 cur_counterparty_commitment_transaction_number,
8072 holder_max_accepted_htlcs,
8073 pending_inbound_htlcs,
8074 pending_outbound_htlcs,
8075 holding_cell_htlc_updates,
8079 monitor_pending_channel_ready,
8080 monitor_pending_revoke_and_ack,
8081 monitor_pending_commitment_signed,
8082 monitor_pending_forwards,
8083 monitor_pending_failures,
8084 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8086 signer_pending_commitment_update: false,
8087 signer_pending_funding: false,
8090 holding_cell_update_fee,
8091 next_holder_htlc_id,
8092 next_counterparty_htlc_id,
8093 update_time_counter,
8096 #[cfg(debug_assertions)]
8097 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8098 #[cfg(debug_assertions)]
8099 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8101 last_sent_closing_fee: None,
8102 pending_counterparty_closing_signed: None,
8103 expecting_peer_commitment_signed: false,
8104 closing_fee_limits: None,
8105 target_closing_feerate_sats_per_kw,
8107 funding_tx_confirmed_in,
8108 funding_tx_confirmation_height,
8110 channel_creation_height: channel_creation_height.unwrap(),
8112 counterparty_dust_limit_satoshis,
8113 holder_dust_limit_satoshis,
8114 counterparty_max_htlc_value_in_flight_msat,
8115 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8116 counterparty_selected_channel_reserve_satoshis,
8117 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8118 counterparty_htlc_minimum_msat,
8119 holder_htlc_minimum_msat,
8120 counterparty_max_accepted_htlcs,
8123 counterparty_forwarding_info,
8125 channel_transaction_parameters: channel_parameters,
8126 funding_transaction,
8129 counterparty_cur_commitment_point,
8130 counterparty_prev_commitment_point,
8131 counterparty_node_id,
8133 counterparty_shutdown_scriptpubkey,
8137 channel_update_status,
8138 closing_signed_in_flight: false,
8142 #[cfg(any(test, fuzzing))]
8143 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8144 #[cfg(any(test, fuzzing))]
8145 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8147 workaround_lnd_bug_4006: None,
8148 sent_message_awaiting_response: None,
8150 latest_inbound_scid_alias,
8151 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8152 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8154 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8155 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8157 #[cfg(any(test, fuzzing))]
8158 historical_inbound_htlc_fulfills,
8160 channel_type: channel_type.unwrap(),
8163 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8172 use bitcoin::blockdata::constants::ChainHash;
8173 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8174 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8175 use bitcoin::blockdata::opcodes;
8176 use bitcoin::network::constants::Network;
8177 use crate::ln::{PaymentHash, PaymentPreimage};
8178 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8179 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8180 use crate::ln::channel::InitFeatures;
8181 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8182 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8183 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8184 use crate::ln::msgs;
8185 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8186 use crate::ln::script::ShutdownScript;
8187 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8188 use crate::chain::BestBlock;
8189 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8190 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8191 use crate::chain::transaction::OutPoint;
8192 use crate::routing::router::{Path, RouteHop};
8193 use crate::util::config::UserConfig;
8194 use crate::util::errors::APIError;
8195 use crate::util::ser::{ReadableArgs, Writeable};
8196 use crate::util::test_utils;
8197 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8198 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8199 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8200 use bitcoin::secp256k1::{SecretKey,PublicKey};
8201 use bitcoin::hashes::sha256::Hash as Sha256;
8202 use bitcoin::hashes::Hash;
8203 use bitcoin::hashes::hex::FromHex;
8204 use bitcoin::hash_types::WPubkeyHash;
8205 use bitcoin::blockdata::locktime::absolute::LockTime;
8206 use bitcoin::address::{WitnessProgram, WitnessVersion};
8207 use crate::prelude::*;
8209 struct TestFeeEstimator {
8212 impl FeeEstimator for TestFeeEstimator {
8213 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8219 fn test_max_funding_satoshis_no_wumbo() {
8220 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8221 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8222 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8226 signer: InMemorySigner,
8229 impl EntropySource for Keys {
8230 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8233 impl SignerProvider for Keys {
8234 type EcdsaSigner = InMemorySigner;
8236 type TaprootSigner = InMemorySigner;
8238 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8239 self.signer.channel_keys_id()
8242 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8246 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8248 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8249 let secp_ctx = Secp256k1::signing_only();
8250 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8251 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8252 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8255 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8256 let secp_ctx = Secp256k1::signing_only();
8257 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8258 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8262 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8263 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8264 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8268 fn upfront_shutdown_script_incompatibility() {
8269 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8270 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8271 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8274 let seed = [42; 32];
8275 let network = Network::Testnet;
8276 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8277 keys_provider.expect(OnGetShutdownScriptpubkey {
8278 returns: non_v0_segwit_shutdown_script.clone(),
8281 let secp_ctx = Secp256k1::new();
8282 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8283 let config = UserConfig::default();
8284 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8285 Err(APIError::IncompatibleShutdownScript { script }) => {
8286 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8288 Err(e) => panic!("Unexpected error: {:?}", e),
8289 Ok(_) => panic!("Expected error"),
8293 // Check that, during channel creation, we use the same feerate in the open channel message
8294 // as we do in the Channel object creation itself.
8296 fn test_open_channel_msg_fee() {
8297 let original_fee = 253;
8298 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8299 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8300 let secp_ctx = Secp256k1::new();
8301 let seed = [42; 32];
8302 let network = Network::Testnet;
8303 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8305 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8306 let config = UserConfig::default();
8307 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8309 // Now change the fee so we can check that the fee in the open_channel message is the
8310 // same as the old fee.
8311 fee_est.fee_est = 500;
8312 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8313 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8317 fn test_holder_vs_counterparty_dust_limit() {
8318 // Test that when calculating the local and remote commitment transaction fees, the correct
8319 // dust limits are used.
8320 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8321 let secp_ctx = Secp256k1::new();
8322 let seed = [42; 32];
8323 let network = Network::Testnet;
8324 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8325 let logger = test_utils::TestLogger::new();
8326 let best_block = BestBlock::from_network(network);
8328 // Go through the flow of opening a channel between two nodes, making sure
8329 // they have different dust limits.
8331 // Create Node A's channel pointing to Node B's pubkey
8332 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8333 let config = UserConfig::default();
8334 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8336 // Create Node B's channel by receiving Node A's open_channel message
8337 // Make sure A's dust limit is as we expect.
8338 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8339 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8340 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8342 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8343 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8344 accept_channel_msg.dust_limit_satoshis = 546;
8345 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8346 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8348 // Node A --> Node B: funding created
8349 let output_script = node_a_chan.context.get_funding_redeemscript();
8350 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8351 value: 10000000, script_pubkey: output_script.clone(),
8353 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8354 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8355 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8357 // Node B --> Node A: funding signed
8358 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8359 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8361 // Put some inbound and outbound HTLCs in A's channel.
8362 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8363 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8365 amount_msat: htlc_amount_msat,
8366 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8367 cltv_expiry: 300000000,
8368 state: InboundHTLCState::Committed,
8371 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8373 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8374 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8375 cltv_expiry: 200000000,
8376 state: OutboundHTLCState::Committed,
8377 source: HTLCSource::OutboundRoute {
8378 path: Path { hops: Vec::new(), blinded_tail: None },
8379 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8380 first_hop_htlc_msat: 548,
8381 payment_id: PaymentId([42; 32]),
8383 skimmed_fee_msat: None,
8384 blinding_point: None,
8387 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8388 // the dust limit check.
8389 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8390 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8391 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8392 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8394 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8395 // of the HTLCs are seen to be above the dust limit.
8396 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8397 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8398 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8399 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8400 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8404 fn test_timeout_vs_success_htlc_dust_limit() {
8405 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8406 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8407 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8408 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8409 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8410 let secp_ctx = Secp256k1::new();
8411 let seed = [42; 32];
8412 let network = Network::Testnet;
8413 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8415 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8416 let config = UserConfig::default();
8417 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8419 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8420 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8422 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8423 // counted as dust when it shouldn't be.
8424 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8425 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8426 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8427 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8429 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8430 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8431 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8432 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8433 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8435 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8437 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8438 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8439 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8440 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8441 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8443 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8444 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8445 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8446 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8447 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8451 fn channel_reestablish_no_updates() {
8452 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8453 let logger = test_utils::TestLogger::new();
8454 let secp_ctx = Secp256k1::new();
8455 let seed = [42; 32];
8456 let network = Network::Testnet;
8457 let best_block = BestBlock::from_network(network);
8458 let chain_hash = ChainHash::using_genesis_block(network);
8459 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8461 // Go through the flow of opening a channel between two nodes.
8463 // Create Node A's channel pointing to Node B's pubkey
8464 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8465 let config = UserConfig::default();
8466 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8468 // Create Node B's channel by receiving Node A's open_channel message
8469 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8470 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8471 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8473 // Node B --> Node A: accept channel
8474 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8475 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8477 // Node A --> Node B: funding created
8478 let output_script = node_a_chan.context.get_funding_redeemscript();
8479 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8480 value: 10000000, script_pubkey: output_script.clone(),
8482 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8483 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8484 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8486 // Node B --> Node A: funding signed
8487 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8488 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8490 // Now disconnect the two nodes and check that the commitment point in
8491 // Node B's channel_reestablish message is sane.
8492 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8493 let msg = node_b_chan.get_channel_reestablish(&&logger);
8494 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8495 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8496 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8498 // Check that the commitment point in Node A's channel_reestablish message
8500 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8501 let msg = node_a_chan.get_channel_reestablish(&&logger);
8502 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8503 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8504 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8508 fn test_configured_holder_max_htlc_value_in_flight() {
8509 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8510 let logger = test_utils::TestLogger::new();
8511 let secp_ctx = Secp256k1::new();
8512 let seed = [42; 32];
8513 let network = Network::Testnet;
8514 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8515 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8516 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8518 let mut config_2_percent = UserConfig::default();
8519 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8520 let mut config_99_percent = UserConfig::default();
8521 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8522 let mut config_0_percent = UserConfig::default();
8523 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8524 let mut config_101_percent = UserConfig::default();
8525 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8527 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8528 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8529 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8530 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8531 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8532 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8534 // Test with the upper bound - 1 of valid values (99%).
8535 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8536 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8537 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8539 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8541 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8542 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8543 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8544 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8545 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8546 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8548 // Test with the upper bound - 1 of valid values (99%).
8549 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8550 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8551 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8553 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8554 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8555 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8556 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8557 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8559 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8560 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8562 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8563 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8564 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8566 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8567 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8568 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8569 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8570 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8572 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8573 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8575 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8576 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8577 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8581 fn test_configured_holder_selected_channel_reserve_satoshis() {
8583 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8584 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8585 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8587 // Test with valid but unreasonably high channel reserves
8588 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8589 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8590 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8592 // Test with calculated channel reserve less than lower bound
8593 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8594 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8596 // Test with invalid channel reserves since sum of both is greater than or equal
8598 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8599 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8602 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8603 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8604 let logger = test_utils::TestLogger::new();
8605 let secp_ctx = Secp256k1::new();
8606 let seed = [42; 32];
8607 let network = Network::Testnet;
8608 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8609 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8610 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8613 let mut outbound_node_config = UserConfig::default();
8614 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8615 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8617 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8618 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8620 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8621 let mut inbound_node_config = UserConfig::default();
8622 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8624 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8625 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8627 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8629 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8630 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8632 // Channel Negotiations failed
8633 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8634 assert!(result.is_err());
8639 fn channel_update() {
8640 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8641 let logger = test_utils::TestLogger::new();
8642 let secp_ctx = Secp256k1::new();
8643 let seed = [42; 32];
8644 let network = Network::Testnet;
8645 let best_block = BestBlock::from_network(network);
8646 let chain_hash = ChainHash::using_genesis_block(network);
8647 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8649 // Create Node A's channel pointing to Node B's pubkey
8650 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8651 let config = UserConfig::default();
8652 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8654 // Create Node B's channel by receiving Node A's open_channel message
8655 // Make sure A's dust limit is as we expect.
8656 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8657 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8658 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8660 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8661 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8662 accept_channel_msg.dust_limit_satoshis = 546;
8663 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8664 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8666 // Node A --> Node B: funding created
8667 let output_script = node_a_chan.context.get_funding_redeemscript();
8668 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8669 value: 10000000, script_pubkey: output_script.clone(),
8671 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8672 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8673 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8675 // Node B --> Node A: funding signed
8676 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8677 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8679 // Make sure that receiving a channel update will update the Channel as expected.
8680 let update = ChannelUpdate {
8681 contents: UnsignedChannelUpdate {
8683 short_channel_id: 0,
8686 cltv_expiry_delta: 100,
8687 htlc_minimum_msat: 5,
8688 htlc_maximum_msat: MAX_VALUE_MSAT,
8690 fee_proportional_millionths: 11,
8691 excess_data: Vec::new(),
8693 signature: Signature::from(unsafe { FFISignature::new() })
8695 assert!(node_a_chan.channel_update(&update).unwrap());
8697 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8698 // change our official htlc_minimum_msat.
8699 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8700 match node_a_chan.context.counterparty_forwarding_info() {
8702 assert_eq!(info.cltv_expiry_delta, 100);
8703 assert_eq!(info.fee_base_msat, 110);
8704 assert_eq!(info.fee_proportional_millionths, 11);
8706 None => panic!("expected counterparty forwarding info to be Some")
8709 assert!(!node_a_chan.channel_update(&update).unwrap());
8713 fn blinding_point_skimmed_fee_ser() {
8714 // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
8715 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8716 let secp_ctx = Secp256k1::new();
8717 let seed = [42; 32];
8718 let network = Network::Testnet;
8719 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8721 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8722 let config = UserConfig::default();
8723 let features = channelmanager::provided_init_features(&config);
8724 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8725 let mut chan = Channel { context: outbound_chan.context };
8727 let dummy_htlc_source = HTLCSource::OutboundRoute {
8729 hops: vec![RouteHop {
8730 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8731 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8732 cltv_expiry_delta: 0, maybe_announced_channel: false,
8736 session_priv: test_utils::privkey(42),
8737 first_hop_htlc_msat: 0,
8738 payment_id: PaymentId([42; 32]),
8740 let dummy_outbound_output = OutboundHTLCOutput {
8743 payment_hash: PaymentHash([43; 32]),
8745 state: OutboundHTLCState::Committed,
8746 source: dummy_htlc_source.clone(),
8747 skimmed_fee_msat: None,
8748 blinding_point: None,
8750 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8751 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8753 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8756 htlc.skimmed_fee_msat = Some(1);
8759 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8761 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8764 payment_hash: PaymentHash([43; 32]),
8765 source: dummy_htlc_source.clone(),
8766 onion_routing_packet: msgs::OnionPacket {
8768 public_key: Ok(test_utils::pubkey(1)),
8769 hop_data: [0; 20*65],
8772 skimmed_fee_msat: None,
8773 blinding_point: None,
8775 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8776 payment_preimage: PaymentPreimage([42; 32]),
8779 let mut holding_cell_htlc_updates = Vec::with_capacity(10);
8782 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8783 } else if i % 3 == 1 {
8784 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8786 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8787 if let HTLCUpdateAwaitingACK::AddHTLC {
8788 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8789 } = &mut dummy_add {
8790 *blinding_point = Some(test_utils::pubkey(42 + i));
8791 *skimmed_fee_msat = Some(42);
8793 holding_cell_htlc_updates.push(dummy_add);
8796 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8798 // Encode and decode the channel and ensure that the HTLCs within are the same.
8799 let encoded_chan = chan.encode();
8800 let mut s = crate::io::Cursor::new(&encoded_chan);
8801 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8802 let features = channelmanager::provided_channel_type_features(&config);
8803 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8804 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8805 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8808 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8810 fn outbound_commitment_test() {
8811 use bitcoin::sighash;
8812 use bitcoin::consensus::encode::serialize;
8813 use bitcoin::sighash::EcdsaSighashType;
8814 use bitcoin::hashes::hex::FromHex;
8815 use bitcoin::hash_types::Txid;
8816 use bitcoin::secp256k1::Message;
8817 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8818 use crate::ln::PaymentPreimage;
8819 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8820 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8821 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8822 use crate::util::logger::Logger;
8823 use crate::sync::Arc;
8824 use core::str::FromStr;
8825 use hex::DisplayHex;
8827 // Test vectors from BOLT 3 Appendices C and F (anchors):
8828 let feeest = TestFeeEstimator{fee_est: 15000};
8829 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
8830 let secp_ctx = Secp256k1::new();
8832 let mut signer = InMemorySigner::new(
8834 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8835 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8836 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8837 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8838 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8840 // These aren't set in the test vectors:
8841 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8847 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8848 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8849 let keys_provider = Keys { signer: signer.clone() };
8851 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8852 let mut config = UserConfig::default();
8853 config.channel_handshake_config.announced_channel = false;
8854 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8855 chan.context.holder_dust_limit_satoshis = 546;
8856 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8858 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8860 let counterparty_pubkeys = ChannelPublicKeys {
8861 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8862 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8863 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8864 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8865 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8867 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8868 CounterpartyChannelTransactionParameters {
8869 pubkeys: counterparty_pubkeys.clone(),
8870 selected_contest_delay: 144
8872 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8873 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8875 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8876 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8878 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8879 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8881 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8882 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8884 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8885 // derived from a commitment_seed, so instead we copy it here and call
8886 // build_commitment_transaction.
8887 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8888 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8889 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8890 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8891 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8893 macro_rules! test_commitment {
8894 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8895 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8896 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8900 macro_rules! test_commitment_with_anchors {
8901 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8902 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8903 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8907 macro_rules! test_commitment_common {
8908 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8909 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8911 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8912 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8914 let htlcs = commitment_stats.htlcs_included.drain(..)
8915 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8917 (commitment_stats.tx, htlcs)
8919 let trusted_tx = commitment_tx.trust();
8920 let unsigned_tx = trusted_tx.built_transaction();
8921 let redeemscript = chan.context.get_funding_redeemscript();
8922 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8923 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8924 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8925 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8927 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8928 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8929 let mut counterparty_htlc_sigs = Vec::new();
8930 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8932 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8933 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8934 counterparty_htlc_sigs.push(remote_signature);
8936 assert_eq!(htlcs.len(), per_htlc.len());
8938 let holder_commitment_tx = HolderCommitmentTransaction::new(
8939 commitment_tx.clone(),
8940 counterparty_signature,
8941 counterparty_htlc_sigs,
8942 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8943 chan.context.counterparty_funding_pubkey()
8945 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8946 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8948 let funding_redeemscript = chan.context.get_funding_redeemscript();
8949 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8950 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8952 // ((htlc, counterparty_sig), (index, holder_sig))
8953 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8956 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8957 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8959 let ref htlc = htlcs[$htlc_idx];
8960 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8961 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8962 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8963 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8964 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8965 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8966 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8968 let mut preimage: Option<PaymentPreimage> = None;
8971 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8972 if out == htlc.payment_hash {
8973 preimage = Some(PaymentPreimage([i; 32]));
8977 assert!(preimage.is_some());
8980 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8981 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8982 channel_derivation_parameters: ChannelDerivationParameters {
8983 value_satoshis: chan.context.channel_value_satoshis,
8984 keys_id: chan.context.channel_keys_id,
8985 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8987 commitment_txid: trusted_tx.txid(),
8988 per_commitment_number: trusted_tx.commitment_number(),
8989 per_commitment_point: trusted_tx.per_commitment_point(),
8990 feerate_per_kw: trusted_tx.feerate_per_kw(),
8992 preimage: preimage.clone(),
8993 counterparty_sig: *htlc_counterparty_sig,
8994 }, &secp_ctx).unwrap();
8995 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
8996 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
8998 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
8999 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9000 let trusted_tx = holder_commitment_tx.trust();
9001 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9002 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9003 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9005 assert!(htlc_counterparty_sig_iter.next().is_none());
9009 // anchors: simple commitment tx with no HTLCs and single anchor
9010 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9011 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9012 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9014 // simple commitment tx with no HTLCs
9015 chan.context.value_to_self_msat = 7000000000;
9017 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9018 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9019 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9021 // anchors: simple commitment tx with no HTLCs
9022 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9023 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9024 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9026 chan.context.pending_inbound_htlcs.push({
9027 let mut out = InboundHTLCOutput{
9029 amount_msat: 1000000,
9031 payment_hash: PaymentHash([0; 32]),
9032 state: InboundHTLCState::Committed,
9034 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9037 chan.context.pending_inbound_htlcs.push({
9038 let mut out = InboundHTLCOutput{
9040 amount_msat: 2000000,
9042 payment_hash: PaymentHash([0; 32]),
9043 state: InboundHTLCState::Committed,
9045 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9048 chan.context.pending_outbound_htlcs.push({
9049 let mut out = OutboundHTLCOutput{
9051 amount_msat: 2000000,
9053 payment_hash: PaymentHash([0; 32]),
9054 state: OutboundHTLCState::Committed,
9055 source: HTLCSource::dummy(),
9056 skimmed_fee_msat: None,
9057 blinding_point: None,
9059 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9062 chan.context.pending_outbound_htlcs.push({
9063 let mut out = OutboundHTLCOutput{
9065 amount_msat: 3000000,
9067 payment_hash: PaymentHash([0; 32]),
9068 state: OutboundHTLCState::Committed,
9069 source: HTLCSource::dummy(),
9070 skimmed_fee_msat: None,
9071 blinding_point: None,
9073 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9076 chan.context.pending_inbound_htlcs.push({
9077 let mut out = InboundHTLCOutput{
9079 amount_msat: 4000000,
9081 payment_hash: PaymentHash([0; 32]),
9082 state: InboundHTLCState::Committed,
9084 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9088 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9089 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9090 chan.context.feerate_per_kw = 0;
9092 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9093 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9094 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9097 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9098 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9099 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9102 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9103 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9104 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9107 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9108 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9109 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9112 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9113 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9114 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9117 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9118 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9119 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9122 // commitment tx with seven outputs untrimmed (maximum feerate)
9123 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9124 chan.context.feerate_per_kw = 647;
9126 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9127 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9128 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9131 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9132 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9133 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9136 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9137 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9138 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9141 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9142 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9143 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9146 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9147 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9148 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9151 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9152 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9153 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9156 // commitment tx with six outputs untrimmed (minimum feerate)
9157 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9158 chan.context.feerate_per_kw = 648;
9160 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9161 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9162 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9165 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9166 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9167 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9170 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9171 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9172 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9175 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9176 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9177 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9180 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9181 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9182 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9185 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9186 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9187 chan.context.feerate_per_kw = 645;
9188 chan.context.holder_dust_limit_satoshis = 1001;
9190 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9191 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9192 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9195 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9196 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9197 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9200 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9201 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9202 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9205 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9206 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9207 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9210 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9211 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9212 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9215 // commitment tx with six outputs untrimmed (maximum feerate)
9216 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9217 chan.context.feerate_per_kw = 2069;
9218 chan.context.holder_dust_limit_satoshis = 546;
9220 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9221 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9222 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9225 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9226 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9227 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9230 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9231 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9232 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9235 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9236 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9237 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9240 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9241 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9242 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9245 // commitment tx with five outputs untrimmed (minimum feerate)
9246 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9247 chan.context.feerate_per_kw = 2070;
9249 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9250 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9251 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9254 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9255 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9256 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9259 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9260 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9261 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9264 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9265 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9266 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9269 // commitment tx with five outputs untrimmed (maximum feerate)
9270 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9271 chan.context.feerate_per_kw = 2194;
9273 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9274 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9275 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9278 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9279 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9280 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9283 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9284 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9285 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9288 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9289 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9290 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9293 // commitment tx with four outputs untrimmed (minimum feerate)
9294 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9295 chan.context.feerate_per_kw = 2195;
9297 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9298 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9299 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9302 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9303 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9304 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9307 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9308 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9309 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9312 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9313 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9314 chan.context.feerate_per_kw = 2185;
9315 chan.context.holder_dust_limit_satoshis = 2001;
9316 let cached_channel_type = chan.context.channel_type;
9317 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9319 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9320 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9321 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9324 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9325 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9326 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9329 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9330 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9331 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9334 // commitment tx with four outputs untrimmed (maximum feerate)
9335 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9336 chan.context.feerate_per_kw = 3702;
9337 chan.context.holder_dust_limit_satoshis = 546;
9338 chan.context.channel_type = cached_channel_type.clone();
9340 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9341 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9342 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9345 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9346 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9347 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9350 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9351 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9352 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9355 // commitment tx with three outputs untrimmed (minimum feerate)
9356 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9357 chan.context.feerate_per_kw = 3703;
9359 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9360 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9361 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9364 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9365 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9366 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9369 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9370 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9371 chan.context.feerate_per_kw = 3687;
9372 chan.context.holder_dust_limit_satoshis = 3001;
9373 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9375 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9376 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9377 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9380 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9381 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9382 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9385 // commitment tx with three outputs untrimmed (maximum feerate)
9386 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9387 chan.context.feerate_per_kw = 4914;
9388 chan.context.holder_dust_limit_satoshis = 546;
9389 chan.context.channel_type = cached_channel_type.clone();
9391 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9392 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9393 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9396 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9397 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9398 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9401 // commitment tx with two outputs untrimmed (minimum feerate)
9402 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9403 chan.context.feerate_per_kw = 4915;
9404 chan.context.holder_dust_limit_satoshis = 546;
9406 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9407 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9408 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9410 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9411 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9412 chan.context.feerate_per_kw = 4894;
9413 chan.context.holder_dust_limit_satoshis = 4001;
9414 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9416 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9417 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9418 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9420 // commitment tx with two outputs untrimmed (maximum feerate)
9421 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9422 chan.context.feerate_per_kw = 9651180;
9423 chan.context.holder_dust_limit_satoshis = 546;
9424 chan.context.channel_type = cached_channel_type.clone();
9426 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9427 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9428 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9430 // commitment tx with one output untrimmed (minimum feerate)
9431 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9432 chan.context.feerate_per_kw = 9651181;
9434 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9435 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9436 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9438 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9439 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9440 chan.context.feerate_per_kw = 6216010;
9441 chan.context.holder_dust_limit_satoshis = 4001;
9442 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9444 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9445 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9446 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9448 // commitment tx with fee greater than funder amount
9449 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9450 chan.context.feerate_per_kw = 9651936;
9451 chan.context.holder_dust_limit_satoshis = 546;
9452 chan.context.channel_type = cached_channel_type;
9454 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9455 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9456 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9458 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9459 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9460 chan.context.feerate_per_kw = 253;
9461 chan.context.pending_inbound_htlcs.clear();
9462 chan.context.pending_inbound_htlcs.push({
9463 let mut out = InboundHTLCOutput{
9465 amount_msat: 2000000,
9467 payment_hash: PaymentHash([0; 32]),
9468 state: InboundHTLCState::Committed,
9470 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9473 chan.context.pending_outbound_htlcs.clear();
9474 chan.context.pending_outbound_htlcs.push({
9475 let mut out = OutboundHTLCOutput{
9477 amount_msat: 5000001,
9479 payment_hash: PaymentHash([0; 32]),
9480 state: OutboundHTLCState::Committed,
9481 source: HTLCSource::dummy(),
9482 skimmed_fee_msat: None,
9483 blinding_point: None,
9485 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9488 chan.context.pending_outbound_htlcs.push({
9489 let mut out = OutboundHTLCOutput{
9491 amount_msat: 5000000,
9493 payment_hash: PaymentHash([0; 32]),
9494 state: OutboundHTLCState::Committed,
9495 source: HTLCSource::dummy(),
9496 skimmed_fee_msat: None,
9497 blinding_point: None,
9499 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9503 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9504 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9505 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9508 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9509 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9510 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9512 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9513 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9514 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9516 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9517 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9518 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9521 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9522 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9523 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9524 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9527 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9528 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9529 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9531 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9532 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9533 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9535 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9536 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9537 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9542 fn test_per_commitment_secret_gen() {
9543 // Test vectors from BOLT 3 Appendix D:
9545 let mut seed = [0; 32];
9546 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9547 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9548 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9550 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9551 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9552 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9554 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9555 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9557 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9558 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9560 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9561 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9562 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9566 fn test_key_derivation() {
9567 // Test vectors from BOLT 3 Appendix E:
9568 let secp_ctx = Secp256k1::new();
9570 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9571 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9573 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9574 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9576 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9577 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9579 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9580 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9582 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9583 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9585 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9586 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9590 fn test_zero_conf_channel_type_support() {
9591 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9592 let secp_ctx = Secp256k1::new();
9593 let seed = [42; 32];
9594 let network = Network::Testnet;
9595 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9596 let logger = test_utils::TestLogger::new();
9598 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9599 let config = UserConfig::default();
9600 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9601 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9603 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9604 channel_type_features.set_zero_conf_required();
9606 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9607 open_channel_msg.channel_type = Some(channel_type_features);
9608 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9609 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9610 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9611 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9612 assert!(res.is_ok());
9616 fn test_supports_anchors_zero_htlc_tx_fee() {
9617 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9618 // resulting `channel_type`.
9619 let secp_ctx = Secp256k1::new();
9620 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9621 let network = Network::Testnet;
9622 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9623 let logger = test_utils::TestLogger::new();
9625 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9626 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9628 let mut config = UserConfig::default();
9629 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9631 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9632 // need to signal it.
9633 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9634 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9635 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9636 &config, 0, 42, None
9638 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9640 let mut expected_channel_type = ChannelTypeFeatures::empty();
9641 expected_channel_type.set_static_remote_key_required();
9642 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9644 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9645 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9646 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9650 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9651 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9652 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9653 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9654 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9657 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9658 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9662 fn test_rejects_implicit_simple_anchors() {
9663 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9664 // each side's `InitFeatures`, it is rejected.
9665 let secp_ctx = Secp256k1::new();
9666 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9667 let network = Network::Testnet;
9668 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9669 let logger = test_utils::TestLogger::new();
9671 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9672 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9674 let config = UserConfig::default();
9676 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9677 let static_remote_key_required: u64 = 1 << 12;
9678 let simple_anchors_required: u64 = 1 << 20;
9679 let raw_init_features = static_remote_key_required | simple_anchors_required;
9680 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9682 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9683 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9684 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9688 // Set `channel_type` to `None` to force the implicit feature negotiation.
9689 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9690 open_channel_msg.channel_type = None;
9692 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9693 // `static_remote_key`, it will fail the channel.
9694 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9695 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9696 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9697 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9699 assert!(channel_b.is_err());
9703 fn test_rejects_simple_anchors_channel_type() {
9704 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9706 let secp_ctx = Secp256k1::new();
9707 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9708 let network = Network::Testnet;
9709 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9710 let logger = test_utils::TestLogger::new();
9712 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9713 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9715 let config = UserConfig::default();
9717 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9718 let static_remote_key_required: u64 = 1 << 12;
9719 let simple_anchors_required: u64 = 1 << 20;
9720 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9721 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9722 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9723 assert!(!simple_anchors_init.requires_unknown_bits());
9724 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9726 // First, we'll try to open a channel between A and B where A requests a channel type for
9727 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9728 // B as it's not supported by LDK.
9729 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9730 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9731 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9735 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9736 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9738 let res = InboundV1Channel::<&TestKeysInterface>::new(
9739 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9740 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9741 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9743 assert!(res.is_err());
9745 // Then, we'll try to open another channel where A requests a channel type for
9746 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9747 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9749 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9750 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9751 10000000, 100000, 42, &config, 0, 42, None
9754 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9756 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9757 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9758 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9759 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9762 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9763 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9765 let res = channel_a.accept_channel(
9766 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9768 assert!(res.is_err());
9772 fn test_waiting_for_batch() {
9773 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9774 let logger = test_utils::TestLogger::new();
9775 let secp_ctx = Secp256k1::new();
9776 let seed = [42; 32];
9777 let network = Network::Testnet;
9778 let best_block = BestBlock::from_network(network);
9779 let chain_hash = ChainHash::using_genesis_block(network);
9780 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9782 let mut config = UserConfig::default();
9783 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9784 // channel in a batch before all channels are ready.
9785 config.channel_handshake_limits.trust_own_funding_0conf = true;
9787 // Create a channel from node a to node b that will be part of batch funding.
9788 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9789 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9794 &channelmanager::provided_init_features(&config),
9804 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9805 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9806 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9811 &channelmanager::provided_channel_type_features(&config),
9812 &channelmanager::provided_init_features(&config),
9818 true, // Allow node b to send a 0conf channel_ready.
9821 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9822 node_a_chan.accept_channel(
9823 &accept_channel_msg,
9824 &config.channel_handshake_limits,
9825 &channelmanager::provided_init_features(&config),
9828 // Fund the channel with a batch funding transaction.
9829 let output_script = node_a_chan.context.get_funding_redeemscript();
9830 let tx = Transaction {
9832 lock_time: LockTime::ZERO,
9836 value: 10000000, script_pubkey: output_script.clone(),
9839 value: 10000000, script_pubkey: Builder::new().into_script(),
9842 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9843 let funding_created_msg = node_a_chan.get_funding_created(
9844 tx.clone(), funding_outpoint, true, &&logger,
9845 ).map_err(|_| ()).unwrap();
9846 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9847 &funding_created_msg.unwrap(),
9851 ).map_err(|_| ()).unwrap();
9852 let node_b_updates = node_b_chan.monitor_updating_restored(
9860 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9861 // broadcasting the funding transaction until the batch is ready.
9862 let res = node_a_chan.funding_signed(
9863 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
9865 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9866 let node_a_updates = node_a_chan.monitor_updating_restored(
9873 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9874 // as the funding transaction depends on all channels in the batch becoming ready.
9875 assert!(node_a_updates.channel_ready.is_none());
9876 assert!(node_a_updates.funding_broadcastable.is_none());
9877 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
9879 // It is possible to receive a 0conf channel_ready from the remote node.
9880 node_a_chan.channel_ready(
9881 &node_b_updates.channel_ready.unwrap(),
9889 node_a_chan.context.channel_state,
9890 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
9893 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9894 node_a_chan.set_batch_ready();
9895 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
9896 assert!(node_a_chan.check_get_channel_ready(0).is_some());