1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
264 macro_rules! define_state_flags {
265 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
266 #[doc = $flag_type_doc]
267 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
268 struct $flag_type(u32);
273 const $flag: $flag_type = $flag_type($value);
276 /// All flags that apply to the specified [`ChannelState`] variant.
278 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
281 fn new() -> Self { Self(0) }
284 fn from_u32(flags: u32) -> Result<Self, ()> {
285 if flags & !Self::ALL.0 != 0 {
288 Ok($flag_type(flags))
293 fn is_empty(&self) -> bool { self.0 == 0 }
296 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
299 impl core::ops::Not for $flag_type {
301 fn not(self) -> Self::Output { Self(!self.0) }
303 impl core::ops::BitOr for $flag_type {
305 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
307 impl core::ops::BitOrAssign for $flag_type {
308 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
310 impl core::ops::BitAnd for $flag_type {
312 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
314 impl core::ops::BitAndAssign for $flag_type {
315 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
318 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
319 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
321 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
322 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
323 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
325 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
327 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
328 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
330 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
332 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
334 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
335 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
337 impl PartialEq<FundedStateFlags> for $flag_type {
338 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
340 impl From<FundedStateFlags> for $flag_type {
341 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
346 /// We declare all the states/flags here together to help determine which bits are still available
349 pub const OUR_INIT_SENT: u32 = 1 << 0;
350 pub const THEIR_INIT_SENT: u32 = 1 << 1;
351 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
352 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
353 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
354 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
355 pub const CHANNEL_READY: u32 = 1 << 6;
356 pub const PEER_DISCONNECTED: u32 = 1 << 7;
357 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
358 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
359 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
360 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
361 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
362 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
366 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
368 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
369 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
370 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
371 somewhere and we should pause sending any outbound messages until they've managed to \
372 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
373 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
374 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
375 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
376 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
377 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
382 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
383 NegotiatingFundingFlags, [
384 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
385 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
386 ("Indicates we have received their `open_channel`/`accept_channel` message.",
387 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
392 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
393 FUNDED_STATE, AwaitingChannelReadyFlags, [
394 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
395 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
396 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
397 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
398 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
399 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
400 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
401 is being held until all channels in the batch have received `funding_signed` and have \
402 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
407 "Flags that only apply to [`ChannelState::ChannelReady`].",
408 FUNDED_STATE, ChannelReadyFlags, [
409 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
410 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
411 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
412 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
413 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
417 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
419 /// We are negotiating the parameters required for the channel prior to funding it.
420 NegotiatingFunding(NegotiatingFundingFlags),
421 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
422 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
423 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
425 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
426 /// funding transaction to confirm.
427 AwaitingChannelReady(AwaitingChannelReadyFlags),
428 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
430 ChannelReady(ChannelReadyFlags),
431 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
432 /// is about to drop us, but we store this anyway.
436 macro_rules! impl_state_flag {
437 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
439 fn $get(&self) -> bool {
442 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
451 ChannelState::$state(flags) => *flags |= $state_flag,
453 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
457 fn $clear(&mut self) {
460 ChannelState::$state(flags) => *flags &= !($state_flag),
462 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
466 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
467 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
469 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
470 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
475 fn from_u32(state: u32) -> Result<Self, ()> {
477 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
478 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
480 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
481 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
482 .map(|flags| ChannelState::AwaitingChannelReady(flags))
483 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
484 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
485 .map(|flags| ChannelState::ChannelReady(flags))
486 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
487 Ok(ChannelState::NegotiatingFunding(flags))
495 fn to_u32(&self) -> u32 {
497 ChannelState::NegotiatingFunding(flags) => flags.0,
498 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
499 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
500 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
501 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
505 fn is_pre_funded_state(&self) -> bool {
506 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
509 fn is_both_sides_shutdown(&self) -> bool {
510 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
513 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
515 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
516 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
517 _ => FundedStateFlags::new(),
521 fn should_force_holding_cell(&self) -> bool {
523 ChannelState::ChannelReady(flags) =>
524 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
525 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
526 flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
528 debug_assert!(false, "The holding cell is only valid within ChannelReady");
534 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
535 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
536 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
537 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
538 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
539 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
540 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
541 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
542 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
543 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
544 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
545 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
546 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
547 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
548 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
549 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
552 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
554 pub const DEFAULT_MAX_HTLCS: u16 = 50;
556 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
557 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
558 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
559 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
563 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
565 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
567 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
569 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
570 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
571 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
572 /// `holder_max_htlc_value_in_flight_msat`.
573 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
575 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
576 /// `option_support_large_channel` (aka wumbo channels) is not supported.
578 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
580 /// Total bitcoin supply in satoshis.
581 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
583 /// The maximum network dust limit for standard script formats. This currently represents the
584 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
585 /// transaction non-standard and thus refuses to relay it.
586 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
587 /// implementations use this value for their dust limit today.
588 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
590 /// The maximum channel dust limit we will accept from our counterparty.
591 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
593 /// The dust limit is used for both the commitment transaction outputs as well as the closing
594 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
595 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
596 /// In order to avoid having to concern ourselves with standardness during the closing process, we
597 /// simply require our counterparty to use a dust limit which will leave any segwit output
599 /// See <https://github.com/lightning/bolts/issues/905> for more details.
600 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
602 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
603 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
605 /// Used to return a simple Error back to ChannelManager. Will get converted to a
606 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
607 /// channel_id in ChannelManager.
608 pub(super) enum ChannelError {
614 impl fmt::Debug for ChannelError {
615 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
617 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
618 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
619 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
624 impl fmt::Display for ChannelError {
625 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
627 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
628 &ChannelError::Warn(ref e) => write!(f, "{}", e),
629 &ChannelError::Close(ref e) => write!(f, "{}", e),
634 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
636 pub peer_id: Option<PublicKey>,
637 pub channel_id: Option<ChannelId>,
640 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
641 fn log(&self, mut record: Record) {
642 record.peer_id = self.peer_id;
643 record.channel_id = self.channel_id;
644 self.logger.log(record)
648 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
649 where L::Target: Logger {
650 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
651 where S::Target: SignerProvider
655 peer_id: Some(context.counterparty_node_id),
656 channel_id: Some(context.channel_id),
661 macro_rules! secp_check {
662 ($res: expr, $err: expr) => {
665 Err(_) => return Err(ChannelError::Close($err)),
670 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
671 /// our counterparty or not. However, we don't want to announce updates right away to avoid
672 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
673 /// our channel_update message and track the current state here.
674 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
675 #[derive(Clone, Copy, PartialEq)]
676 pub(super) enum ChannelUpdateStatus {
677 /// We've announced the channel as enabled and are connected to our peer.
679 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
681 /// Our channel is live again, but we haven't announced the channel as enabled yet.
683 /// We've announced the channel as disabled.
687 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
689 pub enum AnnouncementSigsState {
690 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
691 /// we sent the last `AnnouncementSignatures`.
693 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
694 /// This state never appears on disk - instead we write `NotSent`.
696 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
697 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
698 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
699 /// they send back a `RevokeAndACK`.
700 /// This state never appears on disk - instead we write `NotSent`.
702 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
703 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
707 /// An enum indicating whether the local or remote side offered a given HTLC.
713 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
716 pending_htlcs_value_msat: u64,
717 on_counterparty_tx_dust_exposure_msat: u64,
718 on_holder_tx_dust_exposure_msat: u64,
719 holding_cell_msat: u64,
720 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
723 /// An enum gathering stats on commitment transaction, either local or remote.
724 struct CommitmentStats<'a> {
725 tx: CommitmentTransaction, // the transaction info
726 feerate_per_kw: u32, // the feerate included to build the transaction
727 total_fee_sat: u64, // the total fee included in the transaction
728 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
729 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
730 local_balance_msat: u64, // local balance before fees but considering dust limits
731 remote_balance_msat: u64, // remote balance before fees but considering dust limits
732 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
733 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
736 /// Used when calculating whether we or the remote can afford an additional HTLC.
737 struct HTLCCandidate {
739 origin: HTLCInitiator,
743 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
751 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
753 enum UpdateFulfillFetch {
755 monitor_update: ChannelMonitorUpdate,
756 htlc_value_msat: u64,
757 msg: Option<msgs::UpdateFulfillHTLC>,
762 /// The return type of get_update_fulfill_htlc_and_commit.
763 pub enum UpdateFulfillCommitFetch {
764 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
765 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
766 /// previously placed in the holding cell (and has since been removed).
768 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
769 monitor_update: ChannelMonitorUpdate,
770 /// The value of the HTLC which was claimed, in msat.
771 htlc_value_msat: u64,
773 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
774 /// or has been forgotten (presumably previously claimed).
778 /// The return value of `monitor_updating_restored`
779 pub(super) struct MonitorRestoreUpdates {
780 pub raa: Option<msgs::RevokeAndACK>,
781 pub commitment_update: Option<msgs::CommitmentUpdate>,
782 pub order: RAACommitmentOrder,
783 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
784 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
785 pub finalized_claimed_htlcs: Vec<HTLCSource>,
786 pub funding_broadcastable: Option<Transaction>,
787 pub channel_ready: Option<msgs::ChannelReady>,
788 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
791 /// The return value of `signer_maybe_unblocked`
793 pub(super) struct SignerResumeUpdates {
794 pub commitment_update: Option<msgs::CommitmentUpdate>,
795 pub funding_signed: Option<msgs::FundingSigned>,
796 pub funding_created: Option<msgs::FundingCreated>,
797 pub channel_ready: Option<msgs::ChannelReady>,
800 /// The return value of `channel_reestablish`
801 pub(super) struct ReestablishResponses {
802 pub channel_ready: Option<msgs::ChannelReady>,
803 pub raa: Option<msgs::RevokeAndACK>,
804 pub commitment_update: Option<msgs::CommitmentUpdate>,
805 pub order: RAACommitmentOrder,
806 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
807 pub shutdown_msg: Option<msgs::Shutdown>,
810 /// The result of a shutdown that should be handled.
812 pub(crate) struct ShutdownResult {
813 /// A channel monitor update to apply.
814 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
815 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
816 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
817 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
818 /// propagated to the remainder of the batch.
819 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
820 pub(crate) channel_id: ChannelId,
821 pub(crate) counterparty_node_id: PublicKey,
824 /// If the majority of the channels funds are to the fundee and the initiator holds only just
825 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
826 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
827 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
828 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
829 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
830 /// by this multiple without hitting this case, before sending.
831 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
832 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
833 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
834 /// leave the channel less usable as we hold a bigger reserve.
835 #[cfg(any(fuzzing, test))]
836 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
837 #[cfg(not(any(fuzzing, test)))]
838 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
840 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
841 /// channel creation on an inbound channel, we simply force-close and move on.
842 /// This constant is the one suggested in BOLT 2.
843 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
845 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
846 /// not have enough balance value remaining to cover the onchain cost of this new
847 /// HTLC weight. If this happens, our counterparty fails the reception of our
848 /// commitment_signed including this new HTLC due to infringement on the channel
850 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
851 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
852 /// leads to a channel force-close. Ultimately, this is an issue coming from the
853 /// design of LN state machines, allowing asynchronous updates.
854 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
856 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
857 /// commitment transaction fees, with at least this many HTLCs present on the commitment
858 /// transaction (not counting the value of the HTLCs themselves).
859 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
861 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
862 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
863 /// ChannelUpdate prompted by the config update. This value was determined as follows:
865 /// * The expected interval between ticks (1 minute).
866 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
867 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
868 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
869 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
871 /// The number of ticks that may elapse while we're waiting for a response to a
872 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
875 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
876 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
878 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
879 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
880 /// exceeding this age limit will be force-closed and purged from memory.
881 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
883 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
884 pub(crate) const COINBASE_MATURITY: u32 = 100;
886 struct PendingChannelMonitorUpdate {
887 update: ChannelMonitorUpdate,
890 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
891 (0, update, required),
894 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
895 /// its variants containing an appropriate channel struct.
896 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
897 UnfundedOutboundV1(OutboundV1Channel<SP>),
898 UnfundedInboundV1(InboundV1Channel<SP>),
902 impl<'a, SP: Deref> ChannelPhase<SP> where
903 SP::Target: SignerProvider,
904 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
906 pub fn context(&'a self) -> &'a ChannelContext<SP> {
908 ChannelPhase::Funded(chan) => &chan.context,
909 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
910 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
914 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
916 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
917 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
918 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
923 /// Contains all state common to unfunded inbound/outbound channels.
924 pub(super) struct UnfundedChannelContext {
925 /// A counter tracking how many ticks have elapsed since this unfunded channel was
926 /// created. If this unfunded channel reaches peer has yet to respond after reaching
927 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
929 /// This is so that we don't keep channels around that haven't progressed to a funded state
930 /// in a timely manner.
931 unfunded_channel_age_ticks: usize,
934 impl UnfundedChannelContext {
935 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
936 /// having reached the unfunded channel age limit.
938 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
939 pub fn should_expire_unfunded_channel(&mut self) -> bool {
940 self.unfunded_channel_age_ticks += 1;
941 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
945 /// Contains everything about the channel including state, and various flags.
946 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
947 config: LegacyChannelConfig,
949 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
950 // constructed using it. The second element in the tuple corresponds to the number of ticks that
951 // have elapsed since the update occurred.
952 prev_config: Option<(ChannelConfig, usize)>,
954 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
958 /// The current channel ID.
959 channel_id: ChannelId,
960 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
961 /// Will be `None` for channels created prior to 0.0.115.
962 temporary_channel_id: Option<ChannelId>,
963 channel_state: ChannelState,
965 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
966 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
968 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
969 // Note that a number of our tests were written prior to the behavior here which retransmits
970 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
972 #[cfg(any(test, feature = "_test_utils"))]
973 pub(crate) announcement_sigs_state: AnnouncementSigsState,
974 #[cfg(not(any(test, feature = "_test_utils")))]
975 announcement_sigs_state: AnnouncementSigsState,
977 secp_ctx: Secp256k1<secp256k1::All>,
978 channel_value_satoshis: u64,
980 latest_monitor_update_id: u64,
982 holder_signer: ChannelSignerType<SP>,
983 shutdown_scriptpubkey: Option<ShutdownScript>,
984 destination_script: ScriptBuf,
986 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
987 // generation start at 0 and count up...this simplifies some parts of implementation at the
988 // cost of others, but should really just be changed.
990 cur_holder_commitment_transaction_number: u64,
991 cur_counterparty_commitment_transaction_number: u64,
992 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
993 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
994 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
995 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
997 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
998 /// need to ensure we resend them in the order we originally generated them. Note that because
999 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1000 /// sufficient to simply set this to the opposite of any message we are generating as we
1001 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1002 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1004 resend_order: RAACommitmentOrder,
1006 monitor_pending_channel_ready: bool,
1007 monitor_pending_revoke_and_ack: bool,
1008 monitor_pending_commitment_signed: bool,
1010 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1011 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1012 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1013 // but need to handle this somehow or we run the risk of losing HTLCs!
1014 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1015 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1016 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1018 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1019 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1020 /// the future when the signer indicates it may have a signature for us.
1022 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1023 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1024 signer_pending_commitment_update: bool,
1025 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1026 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1027 /// outbound or inbound.
1028 signer_pending_funding: bool,
1030 // pending_update_fee is filled when sending and receiving update_fee.
1032 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1033 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1034 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1035 // HTLCs with similar state.
1036 pending_update_fee: Option<(u32, FeeUpdateState)>,
1037 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1038 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1039 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1040 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1041 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1042 holding_cell_update_fee: Option<u32>,
1043 next_holder_htlc_id: u64,
1044 next_counterparty_htlc_id: u64,
1045 feerate_per_kw: u32,
1047 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1048 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1049 /// new block is received, ensuring it's always at least moderately close to the current real
1051 update_time_counter: u32,
1053 #[cfg(debug_assertions)]
1054 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1055 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1056 #[cfg(debug_assertions)]
1057 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1058 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1060 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1061 target_closing_feerate_sats_per_kw: Option<u32>,
1063 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1064 /// update, we need to delay processing it until later. We do that here by simply storing the
1065 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1066 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1068 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1069 /// transaction. These are set once we reach `closing_negotiation_ready`.
1071 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1073 closing_fee_limits: Option<(u64, u64)>,
1075 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1076 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1077 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1078 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1079 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1081 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1082 /// until we see a `commitment_signed` before doing so.
1084 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1085 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1086 expecting_peer_commitment_signed: bool,
1088 /// The hash of the block in which the funding transaction was included.
1089 funding_tx_confirmed_in: Option<BlockHash>,
1090 funding_tx_confirmation_height: u32,
1091 short_channel_id: Option<u64>,
1092 /// Either the height at which this channel was created or the height at which it was last
1093 /// serialized if it was serialized by versions prior to 0.0.103.
1094 /// We use this to close if funding is never broadcasted.
1095 channel_creation_height: u32,
1097 counterparty_dust_limit_satoshis: u64,
1100 pub(super) holder_dust_limit_satoshis: u64,
1102 holder_dust_limit_satoshis: u64,
1105 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1107 counterparty_max_htlc_value_in_flight_msat: u64,
1110 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1112 holder_max_htlc_value_in_flight_msat: u64,
1114 /// minimum channel reserve for self to maintain - set by them.
1115 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1118 pub(super) holder_selected_channel_reserve_satoshis: u64,
1120 holder_selected_channel_reserve_satoshis: u64,
1122 counterparty_htlc_minimum_msat: u64,
1123 holder_htlc_minimum_msat: u64,
1125 pub counterparty_max_accepted_htlcs: u16,
1127 counterparty_max_accepted_htlcs: u16,
1128 holder_max_accepted_htlcs: u16,
1129 minimum_depth: Option<u32>,
1131 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1133 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1134 funding_transaction: Option<Transaction>,
1135 is_batch_funding: Option<()>,
1137 counterparty_cur_commitment_point: Option<PublicKey>,
1138 counterparty_prev_commitment_point: Option<PublicKey>,
1139 counterparty_node_id: PublicKey,
1141 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1143 commitment_secrets: CounterpartyCommitmentSecrets,
1145 channel_update_status: ChannelUpdateStatus,
1146 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1147 /// not complete within a single timer tick (one minute), we should force-close the channel.
1148 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1150 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1151 /// our peer and start the closing_signed negotiation fresh.
1152 closing_signed_in_flight: bool,
1154 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1155 /// This can be used to rebroadcast the channel_announcement message later.
1156 announcement_sigs: Option<(Signature, Signature)>,
1158 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1159 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1160 // be, by comparing the cached values to the fee of the tranaction generated by
1161 // `build_commitment_transaction`.
1162 #[cfg(any(test, fuzzing))]
1163 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1164 #[cfg(any(test, fuzzing))]
1165 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1167 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1168 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1169 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1170 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1171 /// message until we receive a channel_reestablish.
1173 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1174 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1176 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1177 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1178 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1179 /// unblock the state machine.
1181 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1182 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1183 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1185 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1186 /// [`msgs::RevokeAndACK`] message from the counterparty.
1187 sent_message_awaiting_response: Option<usize>,
1189 #[cfg(any(test, fuzzing))]
1190 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1191 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1192 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1193 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1194 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1195 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1196 historical_inbound_htlc_fulfills: HashSet<u64>,
1198 /// This channel's type, as negotiated during channel open
1199 channel_type: ChannelTypeFeatures,
1201 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1202 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1203 // the channel's funding UTXO.
1205 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1206 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1207 // associated channel mapping.
1209 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1210 // to store all of them.
1211 latest_inbound_scid_alias: Option<u64>,
1213 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1214 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1215 // don't currently support node id aliases and eventually privacy should be provided with
1216 // blinded paths instead of simple scid+node_id aliases.
1217 outbound_scid_alias: u64,
1219 // We track whether we already emitted a `ChannelPending` event.
1220 channel_pending_event_emitted: bool,
1222 // We track whether we already emitted a `ChannelReady` event.
1223 channel_ready_event_emitted: bool,
1225 /// The unique identifier used to re-derive the private key material for the channel through
1226 /// [`SignerProvider::derive_channel_signer`].
1227 channel_keys_id: [u8; 32],
1229 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1230 /// store it here and only release it to the `ChannelManager` once it asks for it.
1231 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1234 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1235 /// Allowed in any state (including after shutdown)
1236 pub fn get_update_time_counter(&self) -> u32 {
1237 self.update_time_counter
1240 pub fn get_latest_monitor_update_id(&self) -> u64 {
1241 self.latest_monitor_update_id
1244 pub fn should_announce(&self) -> bool {
1245 self.config.announced_channel
1248 pub fn is_outbound(&self) -> bool {
1249 self.channel_transaction_parameters.is_outbound_from_holder
1252 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1253 /// Allowed in any state (including after shutdown)
1254 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1255 self.config.options.forwarding_fee_base_msat
1258 /// Returns true if we've ever received a message from the remote end for this Channel
1259 pub fn have_received_message(&self) -> bool {
1260 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1263 /// Returns true if this channel is fully established and not known to be closing.
1264 /// Allowed in any state (including after shutdown)
1265 pub fn is_usable(&self) -> bool {
1266 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1267 !self.channel_state.is_local_shutdown_sent() &&
1268 !self.channel_state.is_remote_shutdown_sent() &&
1269 !self.monitor_pending_channel_ready
1272 /// shutdown state returns the state of the channel in its various stages of shutdown
1273 pub fn shutdown_state(&self) -> ChannelShutdownState {
1274 match self.channel_state {
1275 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1276 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1277 ChannelShutdownState::ShutdownInitiated
1278 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1279 ChannelShutdownState::ResolvingHTLCs
1280 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1281 ChannelShutdownState::NegotiatingClosingFee
1283 ChannelShutdownState::NotShuttingDown
1285 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1286 _ => ChannelShutdownState::NotShuttingDown,
1290 fn closing_negotiation_ready(&self) -> bool {
1291 let is_ready_to_close = match self.channel_state {
1292 ChannelState::AwaitingChannelReady(flags) =>
1293 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1294 ChannelState::ChannelReady(flags) =>
1295 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1298 self.pending_inbound_htlcs.is_empty() &&
1299 self.pending_outbound_htlcs.is_empty() &&
1300 self.pending_update_fee.is_none() &&
1304 /// Returns true if this channel is currently available for use. This is a superset of
1305 /// is_usable() and considers things like the channel being temporarily disabled.
1306 /// Allowed in any state (including after shutdown)
1307 pub fn is_live(&self) -> bool {
1308 self.is_usable() && !self.channel_state.is_peer_disconnected()
1311 // Public utilities:
1313 pub fn channel_id(&self) -> ChannelId {
1317 // Return the `temporary_channel_id` used during channel establishment.
1319 // Will return `None` for channels created prior to LDK version 0.0.115.
1320 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1321 self.temporary_channel_id
1324 pub fn minimum_depth(&self) -> Option<u32> {
1328 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1329 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1330 pub fn get_user_id(&self) -> u128 {
1334 /// Gets the channel's type
1335 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1339 /// Gets the channel's `short_channel_id`.
1341 /// Will return `None` if the channel hasn't been confirmed yet.
1342 pub fn get_short_channel_id(&self) -> Option<u64> {
1343 self.short_channel_id
1346 /// Allowed in any state (including after shutdown)
1347 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1348 self.latest_inbound_scid_alias
1351 /// Allowed in any state (including after shutdown)
1352 pub fn outbound_scid_alias(&self) -> u64 {
1353 self.outbound_scid_alias
1356 /// Returns the holder signer for this channel.
1358 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1359 return &self.holder_signer
1362 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1363 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1364 /// or prior to any channel actions during `Channel` initialization.
1365 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1366 debug_assert_eq!(self.outbound_scid_alias, 0);
1367 self.outbound_scid_alias = outbound_scid_alias;
1370 /// Returns the funding_txo we either got from our peer, or were given by
1371 /// get_funding_created.
1372 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1373 self.channel_transaction_parameters.funding_outpoint
1376 /// Returns the height in which our funding transaction was confirmed.
1377 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1378 let conf_height = self.funding_tx_confirmation_height;
1379 if conf_height > 0 {
1386 /// Returns the block hash in which our funding transaction was confirmed.
1387 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1388 self.funding_tx_confirmed_in
1391 /// Returns the current number of confirmations on the funding transaction.
1392 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1393 if self.funding_tx_confirmation_height == 0 {
1394 // We either haven't seen any confirmation yet, or observed a reorg.
1398 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1401 fn get_holder_selected_contest_delay(&self) -> u16 {
1402 self.channel_transaction_parameters.holder_selected_contest_delay
1405 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1406 &self.channel_transaction_parameters.holder_pubkeys
1409 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1410 self.channel_transaction_parameters.counterparty_parameters
1411 .as_ref().map(|params| params.selected_contest_delay)
1414 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1415 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1418 /// Allowed in any state (including after shutdown)
1419 pub fn get_counterparty_node_id(&self) -> PublicKey {
1420 self.counterparty_node_id
1423 /// Allowed in any state (including after shutdown)
1424 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1425 self.holder_htlc_minimum_msat
1428 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1429 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1430 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1433 /// Allowed in any state (including after shutdown)
1434 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1436 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1437 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1438 // channel might have been used to route very small values (either by honest users or as DoS).
1439 self.channel_value_satoshis * 1000 * 9 / 10,
1441 self.counterparty_max_htlc_value_in_flight_msat
1445 /// Allowed in any state (including after shutdown)
1446 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1447 self.counterparty_htlc_minimum_msat
1450 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1451 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1452 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1455 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1456 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1457 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1459 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1460 party_max_htlc_value_in_flight_msat
1465 pub fn get_value_satoshis(&self) -> u64 {
1466 self.channel_value_satoshis
1469 pub fn get_fee_proportional_millionths(&self) -> u32 {
1470 self.config.options.forwarding_fee_proportional_millionths
1473 pub fn get_cltv_expiry_delta(&self) -> u16 {
1474 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1477 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1478 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1479 where F::Target: FeeEstimator
1481 match self.config.options.max_dust_htlc_exposure {
1482 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1483 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1484 ConfirmationTarget::OnChainSweep) as u64;
1485 feerate_per_kw.saturating_mul(multiplier)
1487 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1491 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1492 pub fn prev_config(&self) -> Option<ChannelConfig> {
1493 self.prev_config.map(|prev_config| prev_config.0)
1496 // Checks whether we should emit a `ChannelPending` event.
1497 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1498 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1501 // Returns whether we already emitted a `ChannelPending` event.
1502 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1503 self.channel_pending_event_emitted
1506 // Remembers that we already emitted a `ChannelPending` event.
1507 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1508 self.channel_pending_event_emitted = true;
1511 // Checks whether we should emit a `ChannelReady` event.
1512 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1513 self.is_usable() && !self.channel_ready_event_emitted
1516 // Remembers that we already emitted a `ChannelReady` event.
1517 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1518 self.channel_ready_event_emitted = true;
1521 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1522 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1523 /// no longer be considered when forwarding HTLCs.
1524 pub fn maybe_expire_prev_config(&mut self) {
1525 if self.prev_config.is_none() {
1528 let prev_config = self.prev_config.as_mut().unwrap();
1530 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1531 self.prev_config = None;
1535 /// Returns the current [`ChannelConfig`] applied to the channel.
1536 pub fn config(&self) -> ChannelConfig {
1540 /// Updates the channel's config. A bool is returned indicating whether the config update
1541 /// applied resulted in a new ChannelUpdate message.
1542 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1543 let did_channel_update =
1544 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1545 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1546 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1547 if did_channel_update {
1548 self.prev_config = Some((self.config.options, 0));
1549 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1550 // policy change to propagate throughout the network.
1551 self.update_time_counter += 1;
1553 self.config.options = *config;
1557 /// Returns true if funding_signed was sent/received and the
1558 /// funding transaction has been broadcast if necessary.
1559 pub fn is_funding_broadcast(&self) -> bool {
1560 !self.channel_state.is_pre_funded_state() &&
1561 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1564 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1565 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1566 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1567 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1568 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1570 /// @local is used only to convert relevant internal structures which refer to remote vs local
1571 /// to decide value of outputs and direction of HTLCs.
1572 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1573 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1574 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1575 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1576 /// which peer generated this transaction and "to whom" this transaction flows.
1578 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1579 where L::Target: Logger
1581 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1582 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1583 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1585 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1586 let mut remote_htlc_total_msat = 0;
1587 let mut local_htlc_total_msat = 0;
1588 let mut value_to_self_msat_offset = 0;
1590 let mut feerate_per_kw = self.feerate_per_kw;
1591 if let Some((feerate, update_state)) = self.pending_update_fee {
1592 if match update_state {
1593 // Note that these match the inclusion criteria when scanning
1594 // pending_inbound_htlcs below.
1595 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1596 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1597 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1599 feerate_per_kw = feerate;
1603 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1604 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1605 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1607 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1609 macro_rules! get_htlc_in_commitment {
1610 ($htlc: expr, $offered: expr) => {
1611 HTLCOutputInCommitment {
1613 amount_msat: $htlc.amount_msat,
1614 cltv_expiry: $htlc.cltv_expiry,
1615 payment_hash: $htlc.payment_hash,
1616 transaction_output_index: None
1621 macro_rules! add_htlc_output {
1622 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1623 if $outbound == local { // "offered HTLC output"
1624 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1625 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1628 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1630 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1631 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1632 included_non_dust_htlcs.push((htlc_in_tx, $source));
1634 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1635 included_dust_htlcs.push((htlc_in_tx, $source));
1638 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1639 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1642 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1644 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1645 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1646 included_non_dust_htlcs.push((htlc_in_tx, $source));
1648 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1649 included_dust_htlcs.push((htlc_in_tx, $source));
1655 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1657 for ref htlc in self.pending_inbound_htlcs.iter() {
1658 let (include, state_name) = match htlc.state {
1659 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1660 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1661 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1662 InboundHTLCState::Committed => (true, "Committed"),
1663 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1667 add_htlc_output!(htlc, false, None, state_name);
1668 remote_htlc_total_msat += htlc.amount_msat;
1670 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1672 &InboundHTLCState::LocalRemoved(ref reason) => {
1673 if generated_by_local {
1674 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1675 inbound_htlc_preimages.push(preimage);
1676 value_to_self_msat_offset += htlc.amount_msat as i64;
1686 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1688 for ref htlc in self.pending_outbound_htlcs.iter() {
1689 let (include, state_name) = match htlc.state {
1690 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1691 OutboundHTLCState::Committed => (true, "Committed"),
1692 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1693 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1694 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1697 let preimage_opt = match htlc.state {
1698 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1699 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1700 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1704 if let Some(preimage) = preimage_opt {
1705 outbound_htlc_preimages.push(preimage);
1709 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1710 local_htlc_total_msat += htlc.amount_msat;
1712 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1714 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1715 value_to_self_msat_offset -= htlc.amount_msat as i64;
1717 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1718 if !generated_by_local {
1719 value_to_self_msat_offset -= htlc.amount_msat as i64;
1727 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1728 assert!(value_to_self_msat >= 0);
1729 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1730 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1731 // "violate" their reserve value by couting those against it. Thus, we have to convert
1732 // everything to i64 before subtracting as otherwise we can overflow.
1733 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1734 assert!(value_to_remote_msat >= 0);
1736 #[cfg(debug_assertions)]
1738 // Make sure that the to_self/to_remote is always either past the appropriate
1739 // channel_reserve *or* it is making progress towards it.
1740 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1741 self.holder_max_commitment_tx_output.lock().unwrap()
1743 self.counterparty_max_commitment_tx_output.lock().unwrap()
1745 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1746 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1747 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1748 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1751 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1752 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1753 let (value_to_self, value_to_remote) = if self.is_outbound() {
1754 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1756 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1759 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1760 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1761 let (funding_pubkey_a, funding_pubkey_b) = if local {
1762 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1764 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1767 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1768 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1773 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1774 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1779 let num_nondust_htlcs = included_non_dust_htlcs.len();
1781 let channel_parameters =
1782 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1783 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1784 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1791 &mut included_non_dust_htlcs,
1794 let mut htlcs_included = included_non_dust_htlcs;
1795 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1796 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1797 htlcs_included.append(&mut included_dust_htlcs);
1799 // For the stats, trimmed-to-0 the value in msats accordingly
1800 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1801 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1809 local_balance_msat: value_to_self_msat as u64,
1810 remote_balance_msat: value_to_remote_msat as u64,
1811 inbound_htlc_preimages,
1812 outbound_htlc_preimages,
1817 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1818 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1819 /// our counterparty!)
1820 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1821 /// TODO Some magic rust shit to compile-time check this?
1822 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1823 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1824 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1825 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1826 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1828 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1832 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1833 /// will sign and send to our counterparty.
1834 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1835 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1836 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1837 //may see payments to it!
1838 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1839 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1840 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1842 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1845 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1846 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1847 /// Panics if called before accept_channel/InboundV1Channel::new
1848 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1849 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1852 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1853 &self.get_counterparty_pubkeys().funding_pubkey
1856 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1860 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1861 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1862 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1863 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1864 // more dust balance if the feerate increases when we have several HTLCs pending
1865 // which are near the dust limit.
1866 let mut feerate_per_kw = self.feerate_per_kw;
1867 // If there's a pending update fee, use it to ensure we aren't under-estimating
1868 // potential feerate updates coming soon.
1869 if let Some((feerate, _)) = self.pending_update_fee {
1870 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1872 if let Some(feerate) = outbound_feerate_update {
1873 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1875 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1878 /// Get forwarding information for the counterparty.
1879 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1880 self.counterparty_forwarding_info.clone()
1883 /// Returns a HTLCStats about inbound pending htlcs
1884 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1886 let mut stats = HTLCStats {
1887 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1888 pending_htlcs_value_msat: 0,
1889 on_counterparty_tx_dust_exposure_msat: 0,
1890 on_holder_tx_dust_exposure_msat: 0,
1891 holding_cell_msat: 0,
1892 on_holder_tx_holding_cell_htlcs_count: 0,
1895 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1898 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1899 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1900 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1902 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1903 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1904 for ref htlc in context.pending_inbound_htlcs.iter() {
1905 stats.pending_htlcs_value_msat += htlc.amount_msat;
1906 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1907 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1909 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1910 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1916 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1917 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1919 let mut stats = HTLCStats {
1920 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1921 pending_htlcs_value_msat: 0,
1922 on_counterparty_tx_dust_exposure_msat: 0,
1923 on_holder_tx_dust_exposure_msat: 0,
1924 holding_cell_msat: 0,
1925 on_holder_tx_holding_cell_htlcs_count: 0,
1928 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1931 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1932 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1933 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1935 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1936 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1937 for ref htlc in context.pending_outbound_htlcs.iter() {
1938 stats.pending_htlcs_value_msat += htlc.amount_msat;
1939 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1940 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1942 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1943 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1947 for update in context.holding_cell_htlc_updates.iter() {
1948 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1949 stats.pending_htlcs += 1;
1950 stats.pending_htlcs_value_msat += amount_msat;
1951 stats.holding_cell_msat += amount_msat;
1952 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1953 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1955 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1956 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1958 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1965 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1966 /// Doesn't bother handling the
1967 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1968 /// corner case properly.
1969 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1970 -> AvailableBalances
1971 where F::Target: FeeEstimator
1973 let context = &self;
1974 // Note that we have to handle overflow due to the above case.
1975 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1976 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1978 let mut balance_msat = context.value_to_self_msat;
1979 for ref htlc in context.pending_inbound_htlcs.iter() {
1980 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1981 balance_msat += htlc.amount_msat;
1984 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1986 let outbound_capacity_msat = context.value_to_self_msat
1987 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1989 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1991 let mut available_capacity_msat = outbound_capacity_msat;
1993 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1994 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1998 if context.is_outbound() {
1999 // We should mind channel commit tx fee when computing how much of the available capacity
2000 // can be used in the next htlc. Mirrors the logic in send_htlc.
2002 // The fee depends on whether the amount we will be sending is above dust or not,
2003 // and the answer will in turn change the amount itself — making it a circular
2005 // This complicates the computation around dust-values, up to the one-htlc-value.
2006 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2007 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2008 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2011 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2012 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2013 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2014 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2015 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2016 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2017 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2020 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2021 // value ends up being below dust, we have this fee available again. In that case,
2022 // match the value to right-below-dust.
2023 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2024 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2025 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2026 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2027 debug_assert!(one_htlc_difference_msat != 0);
2028 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2029 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2030 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2032 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2035 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2036 // sending a new HTLC won't reduce their balance below our reserve threshold.
2037 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2038 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2039 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2042 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2043 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2045 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2046 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2047 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2049 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2050 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2051 // we've selected for them, we can only send dust HTLCs.
2052 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2056 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2058 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2059 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2060 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2061 // send above the dust limit (as the router can always overpay to meet the dust limit).
2062 let mut remaining_msat_below_dust_exposure_limit = None;
2063 let mut dust_exposure_dust_limit_msat = 0;
2064 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2066 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2067 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2069 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2070 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2071 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2073 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2074 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2075 remaining_msat_below_dust_exposure_limit =
2076 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2077 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2080 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2081 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2082 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2083 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2084 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2085 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2088 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2089 if available_capacity_msat < dust_exposure_dust_limit_msat {
2090 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2092 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2096 available_capacity_msat = cmp::min(available_capacity_msat,
2097 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2099 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2100 available_capacity_msat = 0;
2104 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2105 - context.value_to_self_msat as i64
2106 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2107 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2109 outbound_capacity_msat,
2110 next_outbound_htlc_limit_msat: available_capacity_msat,
2111 next_outbound_htlc_minimum_msat,
2116 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2117 let context = &self;
2118 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2121 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2122 /// number of pending HTLCs that are on track to be in our next commitment tx.
2124 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2125 /// `fee_spike_buffer_htlc` is `Some`.
2127 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2128 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2130 /// Dust HTLCs are excluded.
2131 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2132 let context = &self;
2133 assert!(context.is_outbound());
2135 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2138 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2139 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2141 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2142 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2144 let mut addl_htlcs = 0;
2145 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2147 HTLCInitiator::LocalOffered => {
2148 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2152 HTLCInitiator::RemoteOffered => {
2153 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2159 let mut included_htlcs = 0;
2160 for ref htlc in context.pending_inbound_htlcs.iter() {
2161 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2164 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2165 // transaction including this HTLC if it times out before they RAA.
2166 included_htlcs += 1;
2169 for ref htlc in context.pending_outbound_htlcs.iter() {
2170 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2174 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2175 OutboundHTLCState::Committed => included_htlcs += 1,
2176 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2177 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2178 // transaction won't be generated until they send us their next RAA, which will mean
2179 // dropping any HTLCs in this state.
2184 for htlc in context.holding_cell_htlc_updates.iter() {
2186 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2187 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2192 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2193 // ack we're guaranteed to never include them in commitment txs anymore.
2197 let num_htlcs = included_htlcs + addl_htlcs;
2198 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2199 #[cfg(any(test, fuzzing))]
2202 if fee_spike_buffer_htlc.is_some() {
2203 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2205 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2206 + context.holding_cell_htlc_updates.len();
2207 let commitment_tx_info = CommitmentTxInfoCached {
2209 total_pending_htlcs,
2210 next_holder_htlc_id: match htlc.origin {
2211 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2212 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2214 next_counterparty_htlc_id: match htlc.origin {
2215 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2216 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2218 feerate: context.feerate_per_kw,
2220 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2225 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2226 /// pending HTLCs that are on track to be in their next commitment tx
2228 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2229 /// `fee_spike_buffer_htlc` is `Some`.
2231 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2232 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2234 /// Dust HTLCs are excluded.
2235 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2236 let context = &self;
2237 assert!(!context.is_outbound());
2239 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2242 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2243 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2245 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2246 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2248 let mut addl_htlcs = 0;
2249 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2251 HTLCInitiator::LocalOffered => {
2252 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2256 HTLCInitiator::RemoteOffered => {
2257 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2263 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2264 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2265 // committed outbound HTLCs, see below.
2266 let mut included_htlcs = 0;
2267 for ref htlc in context.pending_inbound_htlcs.iter() {
2268 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2271 included_htlcs += 1;
2274 for ref htlc in context.pending_outbound_htlcs.iter() {
2275 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2278 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2279 // i.e. if they've responded to us with an RAA after announcement.
2281 OutboundHTLCState::Committed => included_htlcs += 1,
2282 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2283 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2288 let num_htlcs = included_htlcs + addl_htlcs;
2289 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2290 #[cfg(any(test, fuzzing))]
2293 if fee_spike_buffer_htlc.is_some() {
2294 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2296 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2297 let commitment_tx_info = CommitmentTxInfoCached {
2299 total_pending_htlcs,
2300 next_holder_htlc_id: match htlc.origin {
2301 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2302 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2304 next_counterparty_htlc_id: match htlc.origin {
2305 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2306 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2308 feerate: context.feerate_per_kw,
2310 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2315 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2316 where F: Fn() -> Option<O> {
2317 match self.channel_state {
2318 ChannelState::FundingNegotiated => f(),
2319 ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2328 /// Returns the transaction if there is a pending funding transaction that is yet to be
2330 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2331 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2334 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2336 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2337 self.if_unbroadcasted_funding(||
2338 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2342 /// Returns whether the channel is funded in a batch.
2343 pub fn is_batch_funding(&self) -> bool {
2344 self.is_batch_funding.is_some()
2347 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2349 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2350 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2353 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2354 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2355 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2356 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2357 /// immediately (others we will have to allow to time out).
2358 pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2359 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2360 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2361 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2362 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2363 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2365 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2366 // return them to fail the payment.
2367 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2368 let counterparty_node_id = self.get_counterparty_node_id();
2369 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2371 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2372 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2377 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2378 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2379 // returning a channel monitor update here would imply a channel monitor update before
2380 // we even registered the channel monitor to begin with, which is invalid.
2381 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2382 // funding transaction, don't return a funding txo (which prevents providing the
2383 // monitor update to the user, even if we return one).
2384 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2385 let generate_monitor_update = match self.channel_state {
2386 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2389 if generate_monitor_update {
2390 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2391 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2392 update_id: self.latest_monitor_update_id,
2393 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2397 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2399 self.channel_state = ChannelState::ShutdownComplete;
2400 self.update_time_counter += 1;
2403 dropped_outbound_htlcs,
2404 unbroadcasted_batch_funding_txid,
2405 channel_id: self.channel_id,
2406 counterparty_node_id: self.counterparty_node_id,
2410 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2411 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
2412 let counterparty_keys = self.build_remote_transaction_keys();
2413 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2414 let signature = match &self.holder_signer {
2415 // TODO (taproot|arik): move match into calling method for Taproot
2416 ChannelSignerType::Ecdsa(ecdsa) => {
2417 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2418 .map(|(sig, _)| sig).ok()?
2420 // TODO (taproot|arik)
2425 if self.signer_pending_funding {
2426 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
2427 self.signer_pending_funding = false;
2430 Some(msgs::FundingCreated {
2431 temporary_channel_id: self.temporary_channel_id.unwrap(),
2432 funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
2433 funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
2436 partial_signature_with_nonce: None,
2438 next_local_nonce: None,
2442 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2443 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2444 let counterparty_keys = self.build_remote_transaction_keys();
2445 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2447 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2448 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2449 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2450 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2452 match &self.holder_signer {
2453 // TODO (arik): move match into calling method for Taproot
2454 ChannelSignerType::Ecdsa(ecdsa) => {
2455 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2456 .map(|(signature, _)| msgs::FundingSigned {
2457 channel_id: self.channel_id(),
2460 partial_signature_with_nonce: None,
2464 if funding_signed.is_none() {
2465 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2466 self.signer_pending_funding = true;
2467 } else if self.signer_pending_funding {
2468 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2469 self.signer_pending_funding = false;
2472 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2473 (counterparty_initial_commitment_tx, funding_signed)
2475 // TODO (taproot|arik)
2482 // Internal utility functions for channels
2484 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2485 /// `channel_value_satoshis` in msat, set through
2486 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2488 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2490 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2491 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2492 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2494 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2497 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2499 channel_value_satoshis * 10 * configured_percent
2502 /// Returns a minimum channel reserve value the remote needs to maintain,
2503 /// required by us according to the configured or default
2504 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2506 /// Guaranteed to return a value no larger than channel_value_satoshis
2508 /// This is used both for outbound and inbound channels and has lower bound
2509 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2510 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2511 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2512 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2515 /// This is for legacy reasons, present for forward-compatibility.
2516 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2517 /// from storage. Hence, we use this function to not persist default values of
2518 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2519 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2520 let (q, _) = channel_value_satoshis.overflowing_div(100);
2521 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2524 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2525 // Note that num_htlcs should not include dust HTLCs.
2527 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2528 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2531 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2532 // Note that num_htlcs should not include dust HTLCs.
2533 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2534 // Note that we need to divide before multiplying to round properly,
2535 // since the lowest denomination of bitcoin on-chain is the satoshi.
2536 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2539 // Holder designates channel data owned for the benefit of the user client.
2540 // Counterparty designates channel data owned by the another channel participant entity.
2541 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2542 pub context: ChannelContext<SP>,
2545 #[cfg(any(test, fuzzing))]
2546 struct CommitmentTxInfoCached {
2548 total_pending_htlcs: usize,
2549 next_holder_htlc_id: u64,
2550 next_counterparty_htlc_id: u64,
2554 impl<SP: Deref> Channel<SP> where
2555 SP::Target: SignerProvider,
2556 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2558 fn check_remote_fee<F: Deref, L: Deref>(
2559 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2560 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2561 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2563 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2564 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2566 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2568 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2569 if feerate_per_kw < lower_limit {
2570 if let Some(cur_feerate) = cur_feerate_per_kw {
2571 if feerate_per_kw > cur_feerate {
2573 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2574 cur_feerate, feerate_per_kw);
2578 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2584 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2585 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2586 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2587 // outside of those situations will fail.
2588 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2592 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2597 1 + // script length (0)
2601 )*4 + // * 4 for non-witness parts
2602 2 + // witness marker and flag
2603 1 + // witness element count
2604 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2605 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2606 2*(1 + 71); // two signatures + sighash type flags
2607 if let Some(spk) = a_scriptpubkey {
2608 ret += ((8+1) + // output values and script length
2609 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2611 if let Some(spk) = b_scriptpubkey {
2612 ret += ((8+1) + // output values and script length
2613 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2619 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2620 assert!(self.context.pending_inbound_htlcs.is_empty());
2621 assert!(self.context.pending_outbound_htlcs.is_empty());
2622 assert!(self.context.pending_update_fee.is_none());
2624 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2625 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2626 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2628 if value_to_holder < 0 {
2629 assert!(self.context.is_outbound());
2630 total_fee_satoshis += (-value_to_holder) as u64;
2631 } else if value_to_counterparty < 0 {
2632 assert!(!self.context.is_outbound());
2633 total_fee_satoshis += (-value_to_counterparty) as u64;
2636 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2637 value_to_counterparty = 0;
2640 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2641 value_to_holder = 0;
2644 assert!(self.context.shutdown_scriptpubkey.is_some());
2645 let holder_shutdown_script = self.get_closing_scriptpubkey();
2646 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2647 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2649 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2650 (closing_transaction, total_fee_satoshis)
2653 fn funding_outpoint(&self) -> OutPoint {
2654 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2657 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2660 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2661 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2663 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2665 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2666 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2667 where L::Target: Logger {
2668 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2669 // (see equivalent if condition there).
2670 assert!(self.context.channel_state.should_force_holding_cell());
2671 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2672 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2673 self.context.latest_monitor_update_id = mon_update_id;
2674 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2675 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2679 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2680 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2681 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2682 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2684 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2685 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2688 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2689 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2690 // these, but for now we just have to treat them as normal.
2692 let mut pending_idx = core::usize::MAX;
2693 let mut htlc_value_msat = 0;
2694 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2695 if htlc.htlc_id == htlc_id_arg {
2696 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2697 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2698 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2700 InboundHTLCState::Committed => {},
2701 InboundHTLCState::LocalRemoved(ref reason) => {
2702 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2704 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2705 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2707 return UpdateFulfillFetch::DuplicateClaim {};
2710 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2711 // Don't return in release mode here so that we can update channel_monitor
2715 htlc_value_msat = htlc.amount_msat;
2719 if pending_idx == core::usize::MAX {
2720 #[cfg(any(test, fuzzing))]
2721 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2722 // this is simply a duplicate claim, not previously failed and we lost funds.
2723 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2724 return UpdateFulfillFetch::DuplicateClaim {};
2727 // Now update local state:
2729 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2730 // can claim it even if the channel hits the chain before we see their next commitment.
2731 self.context.latest_monitor_update_id += 1;
2732 let monitor_update = ChannelMonitorUpdate {
2733 update_id: self.context.latest_monitor_update_id,
2734 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2735 payment_preimage: payment_preimage_arg.clone(),
2739 if self.context.channel_state.should_force_holding_cell() {
2740 // Note that this condition is the same as the assertion in
2741 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2742 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2743 // do not not get into this branch.
2744 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2745 match pending_update {
2746 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2747 if htlc_id_arg == htlc_id {
2748 // Make sure we don't leave latest_monitor_update_id incremented here:
2749 self.context.latest_monitor_update_id -= 1;
2750 #[cfg(any(test, fuzzing))]
2751 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2752 return UpdateFulfillFetch::DuplicateClaim {};
2755 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2756 if htlc_id_arg == htlc_id {
2757 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2758 // TODO: We may actually be able to switch to a fulfill here, though its
2759 // rare enough it may not be worth the complexity burden.
2760 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2761 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2767 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2768 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2769 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2771 #[cfg(any(test, fuzzing))]
2772 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2773 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2775 #[cfg(any(test, fuzzing))]
2776 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2779 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2780 if let InboundHTLCState::Committed = htlc.state {
2782 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2783 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2785 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2786 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2789 UpdateFulfillFetch::NewClaim {
2792 msg: Some(msgs::UpdateFulfillHTLC {
2793 channel_id: self.context.channel_id(),
2794 htlc_id: htlc_id_arg,
2795 payment_preimage: payment_preimage_arg,
2800 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2801 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2802 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2803 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2804 // Even if we aren't supposed to let new monitor updates with commitment state
2805 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2806 // matter what. Sadly, to push a new monitor update which flies before others
2807 // already queued, we have to insert it into the pending queue and update the
2808 // update_ids of all the following monitors.
2809 if release_cs_monitor && msg.is_some() {
2810 let mut additional_update = self.build_commitment_no_status_check(logger);
2811 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2812 // to be strictly increasing by one, so decrement it here.
2813 self.context.latest_monitor_update_id = monitor_update.update_id;
2814 monitor_update.updates.append(&mut additional_update.updates);
2816 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2817 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2818 monitor_update.update_id = new_mon_id;
2819 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2820 held_update.update.update_id += 1;
2823 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2824 let update = self.build_commitment_no_status_check(logger);
2825 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2831 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2832 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2834 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2838 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2839 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2840 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2841 /// before we fail backwards.
2843 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2844 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2845 /// [`ChannelError::Ignore`].
2846 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2847 -> Result<(), ChannelError> where L::Target: Logger {
2848 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2849 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2852 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2853 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2854 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2855 /// before we fail backwards.
2857 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2858 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2859 /// [`ChannelError::Ignore`].
2860 fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2861 -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2862 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2863 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2866 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2867 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2868 // these, but for now we just have to treat them as normal.
2870 let mut pending_idx = core::usize::MAX;
2871 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2872 if htlc.htlc_id == htlc_id_arg {
2874 InboundHTLCState::Committed => {},
2875 InboundHTLCState::LocalRemoved(ref reason) => {
2876 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2878 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2883 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2884 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2890 if pending_idx == core::usize::MAX {
2891 #[cfg(any(test, fuzzing))]
2892 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2893 // is simply a duplicate fail, not previously failed and we failed-back too early.
2894 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2898 if self.context.channel_state.should_force_holding_cell() {
2899 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2900 force_holding_cell = true;
2903 // Now update local state:
2904 if force_holding_cell {
2905 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2906 match pending_update {
2907 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2908 if htlc_id_arg == htlc_id {
2909 #[cfg(any(test, fuzzing))]
2910 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2914 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2915 if htlc_id_arg == htlc_id {
2916 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2917 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2923 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2924 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2925 htlc_id: htlc_id_arg,
2931 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2933 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2934 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2937 Ok(Some(msgs::UpdateFailHTLC {
2938 channel_id: self.context.channel_id(),
2939 htlc_id: htlc_id_arg,
2944 // Message handlers:
2946 /// Handles a funding_signed message from the remote end.
2947 /// If this call is successful, broadcast the funding transaction (and not before!)
2948 pub fn funding_signed<L: Deref>(
2949 &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
2950 ) -> Result<ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>, ChannelError>
2954 if !self.context.is_outbound() {
2955 return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
2957 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
2958 return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned()));
2960 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
2961 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
2962 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
2963 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
2966 let funding_script = self.context.get_funding_redeemscript();
2968 let counterparty_keys = self.context.build_remote_transaction_keys();
2969 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
2970 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2971 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2973 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2974 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2976 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
2977 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
2979 let trusted_tx = initial_commitment_tx.trust();
2980 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
2981 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
2982 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
2983 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
2984 return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
2988 let holder_commitment_tx = HolderCommitmentTransaction::new(
2989 initial_commitment_tx,
2992 &self.context.get_holder_pubkeys().funding_pubkey,
2993 self.context.counterparty_funding_pubkey()
2996 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new())
2997 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3000 let funding_redeemscript = self.context.get_funding_redeemscript();
3001 let funding_txo = self.context.get_funding_txo().unwrap();
3002 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
3003 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
3004 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
3005 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
3006 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
3007 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
3008 shutdown_script, self.context.get_holder_selected_contest_delay(),
3009 &self.context.destination_script, (funding_txo, funding_txo_script),
3010 &self.context.channel_transaction_parameters,
3011 funding_redeemscript.clone(), self.context.channel_value_satoshis,
3013 holder_commitment_tx, best_block, self.context.counterparty_node_id);
3014 channel_monitor.provide_initial_counterparty_commitment_tx(
3015 counterparty_initial_bitcoin_tx.txid, Vec::new(),
3016 self.context.cur_counterparty_commitment_transaction_number,
3017 self.context.counterparty_cur_commitment_point.unwrap(),
3018 counterparty_initial_commitment_tx.feerate_per_kw(),
3019 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
3020 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
3022 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
3023 if self.context.is_batch_funding() {
3024 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
3026 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
3028 self.context.cur_holder_commitment_transaction_number -= 1;
3029 self.context.cur_counterparty_commitment_transaction_number -= 1;
3031 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
3033 let need_channel_ready = self.check_get_channel_ready(0).is_some();
3034 self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
3038 /// Updates the state of the channel to indicate that all channels in the batch have received
3039 /// funding_signed and persisted their monitors.
3040 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3041 /// treated as a non-batch channel going forward.
3042 pub fn set_batch_ready(&mut self) {
3043 self.context.is_batch_funding = None;
3044 self.context.channel_state.clear_waiting_for_batch();
3047 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3048 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3050 pub fn channel_ready<NS: Deref, L: Deref>(
3051 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3052 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3053 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3055 NS::Target: NodeSigner,
3058 if self.context.channel_state.is_peer_disconnected() {
3059 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3060 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3063 if let Some(scid_alias) = msg.short_channel_id_alias {
3064 if Some(scid_alias) != self.context.short_channel_id {
3065 // The scid alias provided can be used to route payments *from* our counterparty,
3066 // i.e. can be used for inbound payments and provided in invoices, but is not used
3067 // when routing outbound payments.
3068 self.context.latest_inbound_scid_alias = Some(scid_alias);
3072 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3073 // batch, but we can receive channel_ready messages.
3074 let mut check_reconnection = false;
3075 match &self.context.channel_state {
3076 ChannelState::AwaitingChannelReady(flags) => {
3077 let flags = *flags & !FundedStateFlags::ALL;
3078 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3079 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3080 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3081 check_reconnection = true;
3082 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3083 self.context.channel_state.set_their_channel_ready();
3084 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3085 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3086 self.context.update_time_counter += 1;
3088 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3089 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3092 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3093 ChannelState::ChannelReady(_) => check_reconnection = true,
3094 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3096 if check_reconnection {
3097 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3098 // required, or they're sending a fresh SCID alias.
3099 let expected_point =
3100 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3101 // If they haven't ever sent an updated point, the point they send should match
3103 self.context.counterparty_cur_commitment_point
3104 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3105 // If we've advanced the commitment number once, the second commitment point is
3106 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3107 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3108 self.context.counterparty_prev_commitment_point
3110 // If they have sent updated points, channel_ready is always supposed to match
3111 // their "first" point, which we re-derive here.
3112 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3113 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3114 ).expect("We already advanced, so previous secret keys should have been validated already")))
3116 if expected_point != Some(msg.next_per_commitment_point) {
3117 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3122 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3123 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3125 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3127 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3130 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3131 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3132 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3133 ) -> Result<(), ChannelError>
3134 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3135 FE::Target: FeeEstimator, L::Target: Logger,
3137 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3138 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3140 // We can't accept HTLCs sent after we've sent a shutdown.
3141 if self.context.channel_state.is_local_shutdown_sent() {
3142 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3144 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3145 if self.context.channel_state.is_remote_shutdown_sent() {
3146 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3148 if self.context.channel_state.is_peer_disconnected() {
3149 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3151 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3152 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3154 if msg.amount_msat == 0 {
3155 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3157 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3158 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3161 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3162 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3163 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3164 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3166 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3167 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3170 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3171 // the reserve_satoshis we told them to always have as direct payment so that they lose
3172 // something if we punish them for broadcasting an old state).
3173 // Note that we don't really care about having a small/no to_remote output in our local
3174 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3175 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3176 // present in the next commitment transaction we send them (at least for fulfilled ones,
3177 // failed ones won't modify value_to_self).
3178 // Note that we will send HTLCs which another instance of rust-lightning would think
3179 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3180 // Channel state once they will not be present in the next received commitment
3182 let mut removed_outbound_total_msat = 0;
3183 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3184 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3185 removed_outbound_total_msat += htlc.amount_msat;
3186 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3187 removed_outbound_total_msat += htlc.amount_msat;
3191 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3192 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3195 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3196 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3197 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3199 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3200 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3201 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3202 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3203 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3204 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3205 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3209 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3210 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3211 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3212 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3213 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3214 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3215 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3219 let pending_value_to_self_msat =
3220 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3221 let pending_remote_value_msat =
3222 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3223 if pending_remote_value_msat < msg.amount_msat {
3224 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3227 // Check that the remote can afford to pay for this HTLC on-chain at the current
3228 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3230 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3231 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3232 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3234 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3235 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3239 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3240 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3242 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3243 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3247 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3248 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3252 if !self.context.is_outbound() {
3253 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3254 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3255 // side, only on the sender's. Note that with anchor outputs we are no longer as
3256 // sensitive to fee spikes, so we need to account for them.
3257 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3258 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3259 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3260 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3262 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3263 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3264 // the HTLC, i.e. its status is already set to failing.
3265 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3266 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3269 // Check that they won't violate our local required channel reserve by adding this HTLC.
3270 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3271 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3272 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3273 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3276 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3277 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3279 if msg.cltv_expiry >= 500000000 {
3280 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3283 if self.context.channel_state.is_local_shutdown_sent() {
3284 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3285 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3289 // Now update local state:
3290 self.context.next_counterparty_htlc_id += 1;
3291 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3292 htlc_id: msg.htlc_id,
3293 amount_msat: msg.amount_msat,
3294 payment_hash: msg.payment_hash,
3295 cltv_expiry: msg.cltv_expiry,
3296 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3301 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3303 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3304 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3305 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3306 if htlc.htlc_id == htlc_id {
3307 let outcome = match check_preimage {
3308 None => fail_reason.into(),
3309 Some(payment_preimage) => {
3310 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3311 if payment_hash != htlc.payment_hash {
3312 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3314 OutboundHTLCOutcome::Success(Some(payment_preimage))
3318 OutboundHTLCState::LocalAnnounced(_) =>
3319 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3320 OutboundHTLCState::Committed => {
3321 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3323 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3324 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3329 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3332 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3333 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3334 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3336 if self.context.channel_state.is_peer_disconnected() {
3337 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3340 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3343 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3344 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3345 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3347 if self.context.channel_state.is_peer_disconnected() {
3348 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3351 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3355 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3356 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3357 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3359 if self.context.channel_state.is_peer_disconnected() {
3360 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3363 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3367 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3368 where L::Target: Logger
3370 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3371 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3373 if self.context.channel_state.is_peer_disconnected() {
3374 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3376 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3377 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3380 let funding_script = self.context.get_funding_redeemscript();
3382 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3384 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3385 let commitment_txid = {
3386 let trusted_tx = commitment_stats.tx.trust();
3387 let bitcoin_tx = trusted_tx.built_transaction();
3388 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3390 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3391 log_bytes!(msg.signature.serialize_compact()[..]),
3392 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3393 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3394 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3395 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3399 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3401 // If our counterparty updated the channel fee in this commitment transaction, check that
3402 // they can actually afford the new fee now.
3403 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3404 update_state == FeeUpdateState::RemoteAnnounced
3407 debug_assert!(!self.context.is_outbound());
3408 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3409 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3410 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3413 #[cfg(any(test, fuzzing))]
3415 if self.context.is_outbound() {
3416 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3417 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3418 if let Some(info) = projected_commit_tx_info {
3419 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3420 + self.context.holding_cell_htlc_updates.len();
3421 if info.total_pending_htlcs == total_pending_htlcs
3422 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3423 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3424 && info.feerate == self.context.feerate_per_kw {
3425 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3431 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3432 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3435 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3436 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3437 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3438 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3439 // backwards compatibility, we never use it in production. To provide test coverage, here,
3440 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3441 #[allow(unused_assignments, unused_mut)]
3442 let mut separate_nondust_htlc_sources = false;
3443 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3444 use core::hash::{BuildHasher, Hasher};
3445 // Get a random value using the only std API to do so - the DefaultHasher
3446 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3447 separate_nondust_htlc_sources = rand_val % 2 == 0;
3450 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3451 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3452 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3453 if let Some(_) = htlc.transaction_output_index {
3454 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3455 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3456 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3458 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3459 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3460 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3461 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3462 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3463 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3464 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3465 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3467 if !separate_nondust_htlc_sources {
3468 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3471 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3473 if separate_nondust_htlc_sources {
3474 if let Some(source) = source_opt.take() {
3475 nondust_htlc_sources.push(source);
3478 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3481 let holder_commitment_tx = HolderCommitmentTransaction::new(
3482 commitment_stats.tx,
3484 msg.htlc_signatures.clone(),
3485 &self.context.get_holder_pubkeys().funding_pubkey,
3486 self.context.counterparty_funding_pubkey()
3489 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3490 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3492 // Update state now that we've passed all the can-fail calls...
3493 let mut need_commitment = false;
3494 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3495 if *update_state == FeeUpdateState::RemoteAnnounced {
3496 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3497 need_commitment = true;
3501 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3502 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3503 Some(forward_info.clone())
3505 if let Some(forward_info) = new_forward {
3506 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3507 &htlc.payment_hash, &self.context.channel_id);
3508 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3509 need_commitment = true;
3512 let mut claimed_htlcs = Vec::new();
3513 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3514 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3515 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3516 &htlc.payment_hash, &self.context.channel_id);
3517 // Grab the preimage, if it exists, instead of cloning
3518 let mut reason = OutboundHTLCOutcome::Success(None);
3519 mem::swap(outcome, &mut reason);
3520 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3521 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3522 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3523 // have a `Success(None)` reason. In this case we could forget some HTLC
3524 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3525 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3527 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3529 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3530 need_commitment = true;
3534 self.context.latest_monitor_update_id += 1;
3535 let mut monitor_update = ChannelMonitorUpdate {
3536 update_id: self.context.latest_monitor_update_id,
3537 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3538 commitment_tx: holder_commitment_tx,
3539 htlc_outputs: htlcs_and_sigs,
3541 nondust_htlc_sources,
3545 self.context.cur_holder_commitment_transaction_number -= 1;
3546 self.context.expecting_peer_commitment_signed = false;
3547 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3548 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3549 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3551 if self.context.channel_state.is_monitor_update_in_progress() {
3552 // In case we initially failed monitor updating without requiring a response, we need
3553 // to make sure the RAA gets sent first.
3554 self.context.monitor_pending_revoke_and_ack = true;
3555 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3556 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3557 // the corresponding HTLC status updates so that
3558 // get_last_commitment_update_for_send includes the right HTLCs.
3559 self.context.monitor_pending_commitment_signed = true;
3560 let mut additional_update = self.build_commitment_no_status_check(logger);
3561 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3562 // strictly increasing by one, so decrement it here.
3563 self.context.latest_monitor_update_id = monitor_update.update_id;
3564 monitor_update.updates.append(&mut additional_update.updates);
3566 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3567 &self.context.channel_id);
3568 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3571 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3572 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3573 // we'll send one right away when we get the revoke_and_ack when we
3574 // free_holding_cell_htlcs().
3575 let mut additional_update = self.build_commitment_no_status_check(logger);
3576 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3577 // strictly increasing by one, so decrement it here.
3578 self.context.latest_monitor_update_id = monitor_update.update_id;
3579 monitor_update.updates.append(&mut additional_update.updates);
3583 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3584 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3585 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3586 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3589 /// Public version of the below, checking relevant preconditions first.
3590 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3591 /// returns `(None, Vec::new())`.
3592 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3593 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3594 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3595 where F::Target: FeeEstimator, L::Target: Logger
3597 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3598 self.free_holding_cell_htlcs(fee_estimator, logger)
3599 } else { (None, Vec::new()) }
3602 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3603 /// for our counterparty.
3604 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3605 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3606 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3607 where F::Target: FeeEstimator, L::Target: Logger
3609 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3610 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3611 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3612 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3614 let mut monitor_update = ChannelMonitorUpdate {
3615 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3616 updates: Vec::new(),
3619 let mut htlc_updates = Vec::new();
3620 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3621 let mut update_add_count = 0;
3622 let mut update_fulfill_count = 0;
3623 let mut update_fail_count = 0;
3624 let mut htlcs_to_fail = Vec::new();
3625 for htlc_update in htlc_updates.drain(..) {
3626 // Note that this *can* fail, though it should be due to rather-rare conditions on
3627 // fee races with adding too many outputs which push our total payments just over
3628 // the limit. In case it's less rare than I anticipate, we may want to revisit
3629 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3630 // to rebalance channels.
3631 match &htlc_update {
3632 &HTLCUpdateAwaitingACK::AddHTLC {
3633 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3634 skimmed_fee_msat, blinding_point, ..
3636 match self.send_htlc(
3637 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3638 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3640 Ok(_) => update_add_count += 1,
3643 ChannelError::Ignore(ref msg) => {
3644 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3645 // If we fail to send here, then this HTLC should
3646 // be failed backwards. Failing to send here
3647 // indicates that this HTLC may keep being put back
3648 // into the holding cell without ever being
3649 // successfully forwarded/failed/fulfilled, causing
3650 // our counterparty to eventually close on us.
3651 htlcs_to_fail.push((source.clone(), *payment_hash));
3654 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3660 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3661 // If an HTLC claim was previously added to the holding cell (via
3662 // `get_update_fulfill_htlc`, then generating the claim message itself must
3663 // not fail - any in between attempts to claim the HTLC will have resulted
3664 // in it hitting the holding cell again and we cannot change the state of a
3665 // holding cell HTLC from fulfill to anything else.
3666 let mut additional_monitor_update =
3667 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3668 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3669 { monitor_update } else { unreachable!() };
3670 update_fulfill_count += 1;
3671 monitor_update.updates.append(&mut additional_monitor_update.updates);
3673 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3674 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3675 Ok(update_fail_msg_option) => {
3676 // If an HTLC failure was previously added to the holding cell (via
3677 // `queue_fail_htlc`) then generating the fail message itself must
3678 // not fail - we should never end up in a state where we double-fail
3679 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3680 // for a full revocation before failing.
3681 debug_assert!(update_fail_msg_option.is_some());
3682 update_fail_count += 1;
3685 if let ChannelError::Ignore(_) = e {}
3687 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3694 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3695 return (None, htlcs_to_fail);
3697 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3698 self.send_update_fee(feerate, false, fee_estimator, logger)
3703 let mut additional_update = self.build_commitment_no_status_check(logger);
3704 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3705 // but we want them to be strictly increasing by one, so reset it here.
3706 self.context.latest_monitor_update_id = monitor_update.update_id;
3707 monitor_update.updates.append(&mut additional_update.updates);
3709 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3710 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3711 update_add_count, update_fulfill_count, update_fail_count);
3713 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3714 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3720 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3721 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3722 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3723 /// generating an appropriate error *after* the channel state has been updated based on the
3724 /// revoke_and_ack message.
3725 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3726 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3727 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3728 where F::Target: FeeEstimator, L::Target: Logger,
3730 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3731 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3733 if self.context.channel_state.is_peer_disconnected() {
3734 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3736 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3737 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3740 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3742 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3743 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3744 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3748 if !self.context.channel_state.is_awaiting_remote_revoke() {
3749 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3750 // haven't given them a new commitment transaction to broadcast). We should probably
3751 // take advantage of this by updating our channel monitor, sending them an error, and
3752 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3753 // lot of work, and there's some chance this is all a misunderstanding anyway.
3754 // We have to do *something*, though, since our signer may get mad at us for otherwise
3755 // jumping a remote commitment number, so best to just force-close and move on.
3756 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3759 #[cfg(any(test, fuzzing))]
3761 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3762 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3765 match &self.context.holder_signer {
3766 ChannelSignerType::Ecdsa(ecdsa) => {
3767 ecdsa.validate_counterparty_revocation(
3768 self.context.cur_counterparty_commitment_transaction_number + 1,
3770 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3772 // TODO (taproot|arik)
3777 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3778 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3779 self.context.latest_monitor_update_id += 1;
3780 let mut monitor_update = ChannelMonitorUpdate {
3781 update_id: self.context.latest_monitor_update_id,
3782 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3783 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3784 secret: msg.per_commitment_secret,
3788 // Update state now that we've passed all the can-fail calls...
3789 // (note that we may still fail to generate the new commitment_signed message, but that's
3790 // OK, we step the channel here and *then* if the new generation fails we can fail the
3791 // channel based on that, but stepping stuff here should be safe either way.
3792 self.context.channel_state.clear_awaiting_remote_revoke();
3793 self.context.sent_message_awaiting_response = None;
3794 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3795 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3796 self.context.cur_counterparty_commitment_transaction_number -= 1;
3798 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3799 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3802 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3803 let mut to_forward_infos = Vec::new();
3804 let mut revoked_htlcs = Vec::new();
3805 let mut finalized_claimed_htlcs = Vec::new();
3806 let mut update_fail_htlcs = Vec::new();
3807 let mut update_fail_malformed_htlcs = Vec::new();
3808 let mut require_commitment = false;
3809 let mut value_to_self_msat_diff: i64 = 0;
3812 // Take references explicitly so that we can hold multiple references to self.context.
3813 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3814 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3815 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3817 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3818 pending_inbound_htlcs.retain(|htlc| {
3819 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3820 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3821 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3822 value_to_self_msat_diff += htlc.amount_msat as i64;
3824 *expecting_peer_commitment_signed = true;
3828 pending_outbound_htlcs.retain(|htlc| {
3829 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3830 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3831 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3832 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3834 finalized_claimed_htlcs.push(htlc.source.clone());
3835 // They fulfilled, so we sent them money
3836 value_to_self_msat_diff -= htlc.amount_msat as i64;
3841 for htlc in pending_inbound_htlcs.iter_mut() {
3842 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3844 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3848 let mut state = InboundHTLCState::Committed;
3849 mem::swap(&mut state, &mut htlc.state);
3851 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3852 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3853 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3854 require_commitment = true;
3855 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3856 match forward_info {
3857 PendingHTLCStatus::Fail(fail_msg) => {
3858 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3859 require_commitment = true;
3861 HTLCFailureMsg::Relay(msg) => {
3862 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3863 update_fail_htlcs.push(msg)
3865 HTLCFailureMsg::Malformed(msg) => {
3866 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3867 update_fail_malformed_htlcs.push(msg)
3871 PendingHTLCStatus::Forward(forward_info) => {
3872 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3873 to_forward_infos.push((forward_info, htlc.htlc_id));
3874 htlc.state = InboundHTLCState::Committed;
3880 for htlc in pending_outbound_htlcs.iter_mut() {
3881 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3882 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3883 htlc.state = OutboundHTLCState::Committed;
3884 *expecting_peer_commitment_signed = true;
3886 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3887 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3888 // Grab the preimage, if it exists, instead of cloning
3889 let mut reason = OutboundHTLCOutcome::Success(None);
3890 mem::swap(outcome, &mut reason);
3891 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3892 require_commitment = true;
3896 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3898 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3899 match update_state {
3900 FeeUpdateState::Outbound => {
3901 debug_assert!(self.context.is_outbound());
3902 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3903 self.context.feerate_per_kw = feerate;
3904 self.context.pending_update_fee = None;
3905 self.context.expecting_peer_commitment_signed = true;
3907 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3908 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3909 debug_assert!(!self.context.is_outbound());
3910 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3911 require_commitment = true;
3912 self.context.feerate_per_kw = feerate;
3913 self.context.pending_update_fee = None;
3918 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3919 let release_state_str =
3920 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3921 macro_rules! return_with_htlcs_to_fail {
3922 ($htlcs_to_fail: expr) => {
3923 if !release_monitor {
3924 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3925 update: monitor_update,
3927 return Ok(($htlcs_to_fail, None));
3929 return Ok(($htlcs_to_fail, Some(monitor_update)));
3934 if self.context.channel_state.is_monitor_update_in_progress() {
3935 // We can't actually generate a new commitment transaction (incl by freeing holding
3936 // cells) while we can't update the monitor, so we just return what we have.
3937 if require_commitment {
3938 self.context.monitor_pending_commitment_signed = true;
3939 // When the monitor updating is restored we'll call
3940 // get_last_commitment_update_for_send(), which does not update state, but we're
3941 // definitely now awaiting a remote revoke before we can step forward any more, so
3943 let mut additional_update = self.build_commitment_no_status_check(logger);
3944 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3945 // strictly increasing by one, so decrement it here.
3946 self.context.latest_monitor_update_id = monitor_update.update_id;
3947 monitor_update.updates.append(&mut additional_update.updates);
3949 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3950 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3951 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3952 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3953 return_with_htlcs_to_fail!(Vec::new());
3956 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3957 (Some(mut additional_update), htlcs_to_fail) => {
3958 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3959 // strictly increasing by one, so decrement it here.
3960 self.context.latest_monitor_update_id = monitor_update.update_id;
3961 monitor_update.updates.append(&mut additional_update.updates);
3963 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3964 &self.context.channel_id(), release_state_str);
3966 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3967 return_with_htlcs_to_fail!(htlcs_to_fail);
3969 (None, htlcs_to_fail) => {
3970 if require_commitment {
3971 let mut additional_update = self.build_commitment_no_status_check(logger);
3973 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3974 // strictly increasing by one, so decrement it here.
3975 self.context.latest_monitor_update_id = monitor_update.update_id;
3976 monitor_update.updates.append(&mut additional_update.updates);
3978 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3979 &self.context.channel_id(),
3980 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3983 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3984 return_with_htlcs_to_fail!(htlcs_to_fail);
3986 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3987 &self.context.channel_id(), release_state_str);
3989 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3990 return_with_htlcs_to_fail!(htlcs_to_fail);
3996 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3997 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3998 /// commitment update.
3999 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
4000 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4001 where F::Target: FeeEstimator, L::Target: Logger
4003 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4004 assert!(msg_opt.is_none(), "We forced holding cell?");
4007 /// Adds a pending update to this channel. See the doc for send_htlc for
4008 /// further details on the optionness of the return value.
4009 /// If our balance is too low to cover the cost of the next commitment transaction at the
4010 /// new feerate, the update is cancelled.
4012 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4013 /// [`Channel`] if `force_holding_cell` is false.
4014 fn send_update_fee<F: Deref, L: Deref>(
4015 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4016 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4017 ) -> Option<msgs::UpdateFee>
4018 where F::Target: FeeEstimator, L::Target: Logger
4020 if !self.context.is_outbound() {
4021 panic!("Cannot send fee from inbound channel");
4023 if !self.context.is_usable() {
4024 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4026 if !self.context.is_live() {
4027 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4030 // Before proposing a feerate update, check that we can actually afford the new fee.
4031 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4032 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4033 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4034 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4035 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4036 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4037 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4038 //TODO: auto-close after a number of failures?
4039 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4043 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4044 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4045 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4046 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4047 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4048 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4051 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4052 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4056 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4057 force_holding_cell = true;
4060 if force_holding_cell {
4061 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4065 debug_assert!(self.context.pending_update_fee.is_none());
4066 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4068 Some(msgs::UpdateFee {
4069 channel_id: self.context.channel_id,
4074 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4075 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4077 /// No further message handling calls may be made until a channel_reestablish dance has
4079 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4080 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4081 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4082 if self.context.channel_state.is_pre_funded_state() {
4086 if self.context.channel_state.is_peer_disconnected() {
4087 // While the below code should be idempotent, it's simpler to just return early, as
4088 // redundant disconnect events can fire, though they should be rare.
4092 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4093 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4096 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4097 // will be retransmitted.
4098 self.context.last_sent_closing_fee = None;
4099 self.context.pending_counterparty_closing_signed = None;
4100 self.context.closing_fee_limits = None;
4102 let mut inbound_drop_count = 0;
4103 self.context.pending_inbound_htlcs.retain(|htlc| {
4105 InboundHTLCState::RemoteAnnounced(_) => {
4106 // They sent us an update_add_htlc but we never got the commitment_signed.
4107 // We'll tell them what commitment_signed we're expecting next and they'll drop
4108 // this HTLC accordingly
4109 inbound_drop_count += 1;
4112 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4113 // We received a commitment_signed updating this HTLC and (at least hopefully)
4114 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4115 // in response to it yet, so don't touch it.
4118 InboundHTLCState::Committed => true,
4119 InboundHTLCState::LocalRemoved(_) => {
4120 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4121 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4122 // (that we missed). Keep this around for now and if they tell us they missed
4123 // the commitment_signed we can re-transmit the update then.
4128 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4130 if let Some((_, update_state)) = self.context.pending_update_fee {
4131 if update_state == FeeUpdateState::RemoteAnnounced {
4132 debug_assert!(!self.context.is_outbound());
4133 self.context.pending_update_fee = None;
4137 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4138 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4139 // They sent us an update to remove this but haven't yet sent the corresponding
4140 // commitment_signed, we need to move it back to Committed and they can re-send
4141 // the update upon reconnection.
4142 htlc.state = OutboundHTLCState::Committed;
4146 self.context.sent_message_awaiting_response = None;
4148 self.context.channel_state.set_peer_disconnected();
4149 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4153 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4154 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4155 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4156 /// update completes (potentially immediately).
4157 /// The messages which were generated with the monitor update must *not* have been sent to the
4158 /// remote end, and must instead have been dropped. They will be regenerated when
4159 /// [`Self::monitor_updating_restored`] is called.
4161 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4162 /// [`chain::Watch`]: crate::chain::Watch
4163 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4164 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4165 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4166 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4167 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4169 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4170 self.context.monitor_pending_commitment_signed |= resend_commitment;
4171 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4172 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4173 self.context.monitor_pending_failures.append(&mut pending_fails);
4174 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4175 self.context.channel_state.set_monitor_update_in_progress();
4178 /// Indicates that the latest ChannelMonitor update has been committed by the client
4179 /// successfully and we should restore normal operation. Returns messages which should be sent
4180 /// to the remote side.
4181 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4182 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4183 user_config: &UserConfig, best_block_height: u32
4184 ) -> MonitorRestoreUpdates
4187 NS::Target: NodeSigner
4189 assert!(self.context.channel_state.is_monitor_update_in_progress());
4190 self.context.channel_state.clear_monitor_update_in_progress();
4192 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4193 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4194 // first received the funding_signed.
4195 let mut funding_broadcastable =
4196 if self.context.is_outbound() &&
4197 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4198 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4200 self.context.funding_transaction.take()
4202 // That said, if the funding transaction is already confirmed (ie we're active with a
4203 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4204 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4205 funding_broadcastable = None;
4208 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4209 // (and we assume the user never directly broadcasts the funding transaction and waits for
4210 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4211 // * an inbound channel that failed to persist the monitor on funding_created and we got
4212 // the funding transaction confirmed before the monitor was persisted, or
4213 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4214 let channel_ready = if self.context.monitor_pending_channel_ready {
4215 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4216 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4217 self.context.monitor_pending_channel_ready = false;
4218 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4219 Some(msgs::ChannelReady {
4220 channel_id: self.context.channel_id(),
4221 next_per_commitment_point,
4222 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4226 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4228 let mut accepted_htlcs = Vec::new();
4229 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4230 let mut failed_htlcs = Vec::new();
4231 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4232 let mut finalized_claimed_htlcs = Vec::new();
4233 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4235 if self.context.channel_state.is_peer_disconnected() {
4236 self.context.monitor_pending_revoke_and_ack = false;
4237 self.context.monitor_pending_commitment_signed = false;
4238 return MonitorRestoreUpdates {
4239 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4240 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4244 let raa = if self.context.monitor_pending_revoke_and_ack {
4245 Some(self.get_last_revoke_and_ack())
4247 let commitment_update = if self.context.monitor_pending_commitment_signed {
4248 self.get_last_commitment_update_for_send(logger).ok()
4250 if commitment_update.is_some() {
4251 self.mark_awaiting_response();
4254 self.context.monitor_pending_revoke_and_ack = false;
4255 self.context.monitor_pending_commitment_signed = false;
4256 let order = self.context.resend_order.clone();
4257 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4258 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4259 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4260 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4261 MonitorRestoreUpdates {
4262 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4266 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4267 where F::Target: FeeEstimator, L::Target: Logger
4269 if self.context.is_outbound() {
4270 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4272 if self.context.channel_state.is_peer_disconnected() {
4273 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4275 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4277 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4278 self.context.update_time_counter += 1;
4279 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4280 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4281 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4282 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4283 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4284 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4285 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4286 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4287 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4288 msg.feerate_per_kw, holder_tx_dust_exposure)));
4290 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4291 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4292 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4298 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4301 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4302 let commitment_update = if self.context.signer_pending_commitment_update {
4303 self.get_last_commitment_update_for_send(logger).ok()
4305 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4306 self.context.get_funding_signed_msg(logger).1
4308 let channel_ready = if funding_signed.is_some() {
4309 self.check_get_channel_ready(0)
4311 let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() {
4312 self.context.get_funding_created_msg(logger)
4315 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready",
4316 if commitment_update.is_some() { "a" } else { "no" },
4317 if funding_signed.is_some() { "a" } else { "no" },
4318 if funding_created.is_some() { "a" } else { "no" },
4319 if channel_ready.is_some() { "a" } else { "no" });
4321 SignerResumeUpdates {
4329 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4330 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4331 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4332 msgs::RevokeAndACK {
4333 channel_id: self.context.channel_id,
4334 per_commitment_secret,
4335 next_per_commitment_point,
4337 next_local_nonce: None,
4341 /// Gets the last commitment update for immediate sending to our peer.
4342 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4343 let mut update_add_htlcs = Vec::new();
4344 let mut update_fulfill_htlcs = Vec::new();
4345 let mut update_fail_htlcs = Vec::new();
4346 let mut update_fail_malformed_htlcs = Vec::new();
4348 for htlc in self.context.pending_outbound_htlcs.iter() {
4349 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4350 update_add_htlcs.push(msgs::UpdateAddHTLC {
4351 channel_id: self.context.channel_id(),
4352 htlc_id: htlc.htlc_id,
4353 amount_msat: htlc.amount_msat,
4354 payment_hash: htlc.payment_hash,
4355 cltv_expiry: htlc.cltv_expiry,
4356 onion_routing_packet: (**onion_packet).clone(),
4357 skimmed_fee_msat: htlc.skimmed_fee_msat,
4358 blinding_point: htlc.blinding_point,
4363 for htlc in self.context.pending_inbound_htlcs.iter() {
4364 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4366 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4367 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4368 channel_id: self.context.channel_id(),
4369 htlc_id: htlc.htlc_id,
4370 reason: err_packet.clone()
4373 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4374 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4375 channel_id: self.context.channel_id(),
4376 htlc_id: htlc.htlc_id,
4377 sha256_of_onion: sha256_of_onion.clone(),
4378 failure_code: failure_code.clone(),
4381 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4382 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4383 channel_id: self.context.channel_id(),
4384 htlc_id: htlc.htlc_id,
4385 payment_preimage: payment_preimage.clone(),
4392 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4393 Some(msgs::UpdateFee {
4394 channel_id: self.context.channel_id(),
4395 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4399 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4400 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4401 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4402 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4403 if self.context.signer_pending_commitment_update {
4404 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4405 self.context.signer_pending_commitment_update = false;
4409 if !self.context.signer_pending_commitment_update {
4410 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4411 self.context.signer_pending_commitment_update = true;
4415 Ok(msgs::CommitmentUpdate {
4416 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4421 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4422 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4423 if self.context.channel_state.is_local_shutdown_sent() {
4424 assert!(self.context.shutdown_scriptpubkey.is_some());
4425 Some(msgs::Shutdown {
4426 channel_id: self.context.channel_id,
4427 scriptpubkey: self.get_closing_scriptpubkey(),
4432 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4433 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4435 /// Some links printed in log lines are included here to check them during build (when run with
4436 /// `cargo doc --document-private-items`):
4437 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4438 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4439 pub fn channel_reestablish<L: Deref, NS: Deref>(
4440 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4441 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4442 ) -> Result<ReestablishResponses, ChannelError>
4445 NS::Target: NodeSigner
4447 if !self.context.channel_state.is_peer_disconnected() {
4448 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4449 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4450 // just close here instead of trying to recover.
4451 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4454 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4455 msg.next_local_commitment_number == 0 {
4456 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4459 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4460 if msg.next_remote_commitment_number > 0 {
4461 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4462 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4463 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4464 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4465 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4467 if msg.next_remote_commitment_number > our_commitment_transaction {
4468 macro_rules! log_and_panic {
4469 ($err_msg: expr) => {
4470 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4471 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4474 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4475 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4476 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4477 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4478 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4479 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4480 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4481 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4485 // Before we change the state of the channel, we check if the peer is sending a very old
4486 // commitment transaction number, if yes we send a warning message.
4487 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4488 return Err(ChannelError::Warn(format!(
4489 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4490 msg.next_remote_commitment_number,
4491 our_commitment_transaction
4495 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4496 // remaining cases either succeed or ErrorMessage-fail).
4497 self.context.channel_state.clear_peer_disconnected();
4498 self.context.sent_message_awaiting_response = None;
4500 let shutdown_msg = self.get_outbound_shutdown();
4502 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4504 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4505 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4506 if !self.context.channel_state.is_our_channel_ready() ||
4507 self.context.channel_state.is_monitor_update_in_progress() {
4508 if msg.next_remote_commitment_number != 0 {
4509 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4511 // Short circuit the whole handler as there is nothing we can resend them
4512 return Ok(ReestablishResponses {
4513 channel_ready: None,
4514 raa: None, commitment_update: None,
4515 order: RAACommitmentOrder::CommitmentFirst,
4516 shutdown_msg, announcement_sigs,
4520 // We have OurChannelReady set!
4521 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4522 return Ok(ReestablishResponses {
4523 channel_ready: Some(msgs::ChannelReady {
4524 channel_id: self.context.channel_id(),
4525 next_per_commitment_point,
4526 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4528 raa: None, commitment_update: None,
4529 order: RAACommitmentOrder::CommitmentFirst,
4530 shutdown_msg, announcement_sigs,
4534 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4535 // Remote isn't waiting on any RevokeAndACK from us!
4536 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4538 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4539 if self.context.channel_state.is_monitor_update_in_progress() {
4540 self.context.monitor_pending_revoke_and_ack = true;
4543 Some(self.get_last_revoke_and_ack())
4546 debug_assert!(false, "All values should have been handled in the four cases above");
4547 return Err(ChannelError::Close(format!(
4548 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4549 msg.next_remote_commitment_number,
4550 our_commitment_transaction
4554 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4555 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4556 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4557 // the corresponding revoke_and_ack back yet.
4558 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4559 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4560 self.mark_awaiting_response();
4562 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4564 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4565 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4566 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4567 Some(msgs::ChannelReady {
4568 channel_id: self.context.channel_id(),
4569 next_per_commitment_point,
4570 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4574 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4575 if required_revoke.is_some() {
4576 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4578 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4581 Ok(ReestablishResponses {
4582 channel_ready, shutdown_msg, announcement_sigs,
4583 raa: required_revoke,
4584 commitment_update: None,
4585 order: self.context.resend_order.clone(),
4587 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4588 if required_revoke.is_some() {
4589 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4591 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4594 if self.context.channel_state.is_monitor_update_in_progress() {
4595 self.context.monitor_pending_commitment_signed = true;
4596 Ok(ReestablishResponses {
4597 channel_ready, shutdown_msg, announcement_sigs,
4598 commitment_update: None, raa: None,
4599 order: self.context.resend_order.clone(),
4602 Ok(ReestablishResponses {
4603 channel_ready, shutdown_msg, announcement_sigs,
4604 raa: required_revoke,
4605 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4606 order: self.context.resend_order.clone(),
4609 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4610 Err(ChannelError::Close(format!(
4611 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4612 msg.next_local_commitment_number,
4613 next_counterparty_commitment_number,
4616 Err(ChannelError::Close(format!(
4617 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4618 msg.next_local_commitment_number,
4619 next_counterparty_commitment_number,
4624 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4625 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4626 /// at which point they will be recalculated.
4627 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4629 where F::Target: FeeEstimator
4631 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4633 // Propose a range from our current Background feerate to our Normal feerate plus our
4634 // force_close_avoidance_max_fee_satoshis.
4635 // If we fail to come to consensus, we'll have to force-close.
4636 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4637 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4638 // that we don't expect to need fee bumping
4639 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4640 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4642 // The spec requires that (when the channel does not have anchors) we only send absolute
4643 // channel fees no greater than the absolute channel fee on the current commitment
4644 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4645 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4646 // some force-closure by old nodes, but we wanted to close the channel anyway.
4648 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4649 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4650 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4651 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4654 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4655 // below our dust limit, causing the output to disappear. We don't bother handling this
4656 // case, however, as this should only happen if a channel is closed before any (material)
4657 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4658 // come to consensus with our counterparty on appropriate fees, however it should be a
4659 // relatively rare case. We can revisit this later, though note that in order to determine
4660 // if the funders' output is dust we have to know the absolute fee we're going to use.
4661 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4662 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4663 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4664 // We always add force_close_avoidance_max_fee_satoshis to our normal
4665 // feerate-calculated fee, but allow the max to be overridden if we're using a
4666 // target feerate-calculated fee.
4667 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4668 proposed_max_feerate as u64 * tx_weight / 1000)
4670 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4673 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4674 self.context.closing_fee_limits.clone().unwrap()
4677 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4678 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4679 /// this point if we're the funder we should send the initial closing_signed, and in any case
4680 /// shutdown should complete within a reasonable timeframe.
4681 fn closing_negotiation_ready(&self) -> bool {
4682 self.context.closing_negotiation_ready()
4685 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4686 /// an Err if no progress is being made and the channel should be force-closed instead.
4687 /// Should be called on a one-minute timer.
4688 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4689 if self.closing_negotiation_ready() {
4690 if self.context.closing_signed_in_flight {
4691 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4693 self.context.closing_signed_in_flight = true;
4699 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4700 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4701 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4702 where F::Target: FeeEstimator, L::Target: Logger
4704 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4705 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4706 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4707 // that closing_negotiation_ready checks this case (as well as a few others).
4708 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4709 return Ok((None, None, None));
4712 if !self.context.is_outbound() {
4713 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4714 return self.closing_signed(fee_estimator, &msg);
4716 return Ok((None, None, None));
4719 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4720 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4721 if self.context.expecting_peer_commitment_signed {
4722 return Ok((None, None, None));
4725 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4727 assert!(self.context.shutdown_scriptpubkey.is_some());
4728 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4729 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4730 our_min_fee, our_max_fee, total_fee_satoshis);
4732 match &self.context.holder_signer {
4733 ChannelSignerType::Ecdsa(ecdsa) => {
4735 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4736 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4738 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4739 Ok((Some(msgs::ClosingSigned {
4740 channel_id: self.context.channel_id,
4741 fee_satoshis: total_fee_satoshis,
4743 fee_range: Some(msgs::ClosingSignedFeeRange {
4744 min_fee_satoshis: our_min_fee,
4745 max_fee_satoshis: our_max_fee,
4749 // TODO (taproot|arik)
4755 // Marks a channel as waiting for a response from the counterparty. If it's not received
4756 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4758 fn mark_awaiting_response(&mut self) {
4759 self.context.sent_message_awaiting_response = Some(0);
4762 /// Determines whether we should disconnect the counterparty due to not receiving a response
4763 /// within our expected timeframe.
4765 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4766 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4767 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4770 // Don't disconnect when we're not waiting on a response.
4773 *ticks_elapsed += 1;
4774 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4778 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4779 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4781 if self.context.channel_state.is_peer_disconnected() {
4782 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4784 if self.context.channel_state.is_pre_funded_state() {
4785 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4786 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4787 // can do that via error message without getting a connection fail anyway...
4788 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4790 for htlc in self.context.pending_inbound_htlcs.iter() {
4791 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4792 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4795 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4797 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4798 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4801 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4802 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4803 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4806 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4809 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4810 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4811 // any further commitment updates after we set LocalShutdownSent.
4812 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4814 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4817 assert!(send_shutdown);
4818 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4819 Ok(scriptpubkey) => scriptpubkey,
4820 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4822 if !shutdown_scriptpubkey.is_compatible(their_features) {
4823 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4825 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4830 // From here on out, we may not fail!
4832 self.context.channel_state.set_remote_shutdown_sent();
4833 self.context.update_time_counter += 1;
4835 let monitor_update = if update_shutdown_script {
4836 self.context.latest_monitor_update_id += 1;
4837 let monitor_update = ChannelMonitorUpdate {
4838 update_id: self.context.latest_monitor_update_id,
4839 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4840 scriptpubkey: self.get_closing_scriptpubkey(),
4843 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4844 self.push_ret_blockable_mon_update(monitor_update)
4846 let shutdown = if send_shutdown {
4847 Some(msgs::Shutdown {
4848 channel_id: self.context.channel_id,
4849 scriptpubkey: self.get_closing_scriptpubkey(),
4853 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4854 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4855 // cell HTLCs and return them to fail the payment.
4856 self.context.holding_cell_update_fee = None;
4857 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4858 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4860 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4861 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4868 self.context.channel_state.set_local_shutdown_sent();
4869 self.context.update_time_counter += 1;
4871 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4874 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4875 let mut tx = closing_tx.trust().built_transaction().clone();
4877 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4879 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4880 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4881 let mut holder_sig = sig.serialize_der().to_vec();
4882 holder_sig.push(EcdsaSighashType::All as u8);
4883 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4884 cp_sig.push(EcdsaSighashType::All as u8);
4885 if funding_key[..] < counterparty_funding_key[..] {
4886 tx.input[0].witness.push(holder_sig);
4887 tx.input[0].witness.push(cp_sig);
4889 tx.input[0].witness.push(cp_sig);
4890 tx.input[0].witness.push(holder_sig);
4893 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4897 pub fn closing_signed<F: Deref>(
4898 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4899 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4900 where F::Target: FeeEstimator
4902 if !self.context.channel_state.is_both_sides_shutdown() {
4903 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4905 if self.context.channel_state.is_peer_disconnected() {
4906 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4908 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4909 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4911 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4912 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4915 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4916 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4919 if self.context.channel_state.is_monitor_update_in_progress() {
4920 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4921 return Ok((None, None, None));
4924 let funding_redeemscript = self.context.get_funding_redeemscript();
4925 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4926 if used_total_fee != msg.fee_satoshis {
4927 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4929 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4931 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4934 // The remote end may have decided to revoke their output due to inconsistent dust
4935 // limits, so check for that case by re-checking the signature here.
4936 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4937 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4938 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4942 for outp in closing_tx.trust().built_transaction().output.iter() {
4943 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4944 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4948 assert!(self.context.shutdown_scriptpubkey.is_some());
4949 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4950 if last_fee == msg.fee_satoshis {
4951 let shutdown_result = ShutdownResult {
4952 monitor_update: None,
4953 dropped_outbound_htlcs: Vec::new(),
4954 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4955 channel_id: self.context.channel_id,
4956 counterparty_node_id: self.context.counterparty_node_id,
4958 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4959 self.context.channel_state = ChannelState::ShutdownComplete;
4960 self.context.update_time_counter += 1;
4961 return Ok((None, Some(tx), Some(shutdown_result)));
4965 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4967 macro_rules! propose_fee {
4968 ($new_fee: expr) => {
4969 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4970 (closing_tx, $new_fee)
4972 self.build_closing_transaction($new_fee, false)
4975 return match &self.context.holder_signer {
4976 ChannelSignerType::Ecdsa(ecdsa) => {
4978 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4979 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4980 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4981 let shutdown_result = ShutdownResult {
4982 monitor_update: None,
4983 dropped_outbound_htlcs: Vec::new(),
4984 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4985 channel_id: self.context.channel_id,
4986 counterparty_node_id: self.context.counterparty_node_id,
4988 self.context.channel_state = ChannelState::ShutdownComplete;
4989 self.context.update_time_counter += 1;
4990 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4991 (Some(tx), Some(shutdown_result))
4996 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4997 Ok((Some(msgs::ClosingSigned {
4998 channel_id: self.context.channel_id,
4999 fee_satoshis: used_fee,
5001 fee_range: Some(msgs::ClosingSignedFeeRange {
5002 min_fee_satoshis: our_min_fee,
5003 max_fee_satoshis: our_max_fee,
5005 }), signed_tx, shutdown_result))
5007 // TODO (taproot|arik)
5014 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5015 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5016 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5018 if max_fee_satoshis < our_min_fee {
5019 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5021 if min_fee_satoshis > our_max_fee {
5022 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5025 if !self.context.is_outbound() {
5026 // They have to pay, so pick the highest fee in the overlapping range.
5027 // We should never set an upper bound aside from their full balance
5028 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5029 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5031 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5032 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5033 msg.fee_satoshis, our_min_fee, our_max_fee)));
5035 // The proposed fee is in our acceptable range, accept it and broadcast!
5036 propose_fee!(msg.fee_satoshis);
5039 // Old fee style negotiation. We don't bother to enforce whether they are complying
5040 // with the "making progress" requirements, we just comply and hope for the best.
5041 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5042 if msg.fee_satoshis > last_fee {
5043 if msg.fee_satoshis < our_max_fee {
5044 propose_fee!(msg.fee_satoshis);
5045 } else if last_fee < our_max_fee {
5046 propose_fee!(our_max_fee);
5048 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5051 if msg.fee_satoshis > our_min_fee {
5052 propose_fee!(msg.fee_satoshis);
5053 } else if last_fee > our_min_fee {
5054 propose_fee!(our_min_fee);
5056 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5060 if msg.fee_satoshis < our_min_fee {
5061 propose_fee!(our_min_fee);
5062 } else if msg.fee_satoshis > our_max_fee {
5063 propose_fee!(our_max_fee);
5065 propose_fee!(msg.fee_satoshis);
5071 fn internal_htlc_satisfies_config(
5072 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5073 ) -> Result<(), (&'static str, u16)> {
5074 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5075 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5076 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5077 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5079 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5080 0x1000 | 12, // fee_insufficient
5083 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5085 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5086 0x1000 | 13, // incorrect_cltv_expiry
5092 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5093 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5094 /// unsuccessful, falls back to the previous one if one exists.
5095 pub fn htlc_satisfies_config(
5096 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5097 ) -> Result<(), (&'static str, u16)> {
5098 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5100 if let Some(prev_config) = self.context.prev_config() {
5101 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5108 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5109 self.context.cur_holder_commitment_transaction_number + 1
5112 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5113 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5116 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5117 self.context.cur_counterparty_commitment_transaction_number + 2
5121 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5122 &self.context.holder_signer
5126 pub fn get_value_stat(&self) -> ChannelValueStat {
5128 value_to_self_msat: self.context.value_to_self_msat,
5129 channel_value_msat: self.context.channel_value_satoshis * 1000,
5130 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5131 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5132 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5133 holding_cell_outbound_amount_msat: {
5135 for h in self.context.holding_cell_htlc_updates.iter() {
5137 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5145 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5146 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5150 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5151 /// Allowed in any state (including after shutdown)
5152 pub fn is_awaiting_monitor_update(&self) -> bool {
5153 self.context.channel_state.is_monitor_update_in_progress()
5156 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5157 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5158 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5159 self.context.blocked_monitor_updates[0].update.update_id - 1
5162 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5163 /// further blocked monitor update exists after the next.
5164 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5165 if self.context.blocked_monitor_updates.is_empty() { return None; }
5166 Some((self.context.blocked_monitor_updates.remove(0).update,
5167 !self.context.blocked_monitor_updates.is_empty()))
5170 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5171 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5172 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5173 -> Option<ChannelMonitorUpdate> {
5174 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5175 if !release_monitor {
5176 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5185 pub fn blocked_monitor_updates_pending(&self) -> usize {
5186 self.context.blocked_monitor_updates.len()
5189 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5190 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5191 /// transaction. If the channel is inbound, this implies simply that the channel has not
5193 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5194 if !self.is_awaiting_monitor_update() { return false; }
5196 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5197 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5199 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5200 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5201 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5204 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5205 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5206 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5207 // waiting for the initial monitor persistence. Thus, we check if our commitment
5208 // transaction numbers have both been iterated only exactly once (for the
5209 // funding_signed), and we're awaiting monitor update.
5211 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5212 // only way to get an awaiting-monitor-update state during initial funding is if the
5213 // initial monitor persistence is still pending).
5215 // Because deciding we're awaiting initial broadcast spuriously could result in
5216 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5217 // we hard-assert here, even in production builds.
5218 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5219 assert!(self.context.monitor_pending_channel_ready);
5220 assert_eq!(self.context.latest_monitor_update_id, 0);
5226 /// Returns true if our channel_ready has been sent
5227 pub fn is_our_channel_ready(&self) -> bool {
5228 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5229 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5232 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5233 pub fn received_shutdown(&self) -> bool {
5234 self.context.channel_state.is_remote_shutdown_sent()
5237 /// Returns true if we either initiated or agreed to shut down the channel.
5238 pub fn sent_shutdown(&self) -> bool {
5239 self.context.channel_state.is_local_shutdown_sent()
5242 /// Returns true if this channel is fully shut down. True here implies that no further actions
5243 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5244 /// will be handled appropriately by the chain monitor.
5245 pub fn is_shutdown(&self) -> bool {
5246 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5249 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5250 self.context.channel_update_status
5253 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5254 self.context.update_time_counter += 1;
5255 self.context.channel_update_status = status;
5258 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5260 // * always when a new block/transactions are confirmed with the new height
5261 // * when funding is signed with a height of 0
5262 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5266 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5267 if funding_tx_confirmations <= 0 {
5268 self.context.funding_tx_confirmation_height = 0;
5271 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5275 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5276 // channel_ready yet.
5277 if self.context.signer_pending_funding {
5281 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5282 // channel_ready until the entire batch is ready.
5283 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5284 self.context.channel_state.set_our_channel_ready();
5286 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5287 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5288 self.context.update_time_counter += 1;
5290 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5291 // We got a reorg but not enough to trigger a force close, just ignore.
5294 if self.context.funding_tx_confirmation_height != 0 &&
5295 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5297 // We should never see a funding transaction on-chain until we've received
5298 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5299 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5300 // however, may do this and we shouldn't treat it as a bug.
5301 #[cfg(not(fuzzing))]
5302 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5303 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5304 self.context.channel_state.to_u32());
5306 // We got a reorg but not enough to trigger a force close, just ignore.
5310 if need_commitment_update {
5311 if !self.context.channel_state.is_monitor_update_in_progress() {
5312 if !self.context.channel_state.is_peer_disconnected() {
5313 let next_per_commitment_point =
5314 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5315 return Some(msgs::ChannelReady {
5316 channel_id: self.context.channel_id,
5317 next_per_commitment_point,
5318 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5322 self.context.monitor_pending_channel_ready = true;
5328 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5329 /// In the first case, we store the confirmation height and calculating the short channel id.
5330 /// In the second, we simply return an Err indicating we need to be force-closed now.
5331 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5332 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5333 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5334 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5336 NS::Target: NodeSigner,
5339 let mut msgs = (None, None);
5340 if let Some(funding_txo) = self.context.get_funding_txo() {
5341 for &(index_in_block, tx) in txdata.iter() {
5342 // Check if the transaction is the expected funding transaction, and if it is,
5343 // check that it pays the right amount to the right script.
5344 if self.context.funding_tx_confirmation_height == 0 {
5345 if tx.txid() == funding_txo.txid {
5346 let txo_idx = funding_txo.index as usize;
5347 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5348 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5349 if self.context.is_outbound() {
5350 // If we generated the funding transaction and it doesn't match what it
5351 // should, the client is really broken and we should just panic and
5352 // tell them off. That said, because hash collisions happen with high
5353 // probability in fuzzing mode, if we're fuzzing we just close the
5354 // channel and move on.
5355 #[cfg(not(fuzzing))]
5356 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5358 self.context.update_time_counter += 1;
5359 let err_reason = "funding tx had wrong script/value or output index";
5360 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5362 if self.context.is_outbound() {
5363 if !tx.is_coin_base() {
5364 for input in tx.input.iter() {
5365 if input.witness.is_empty() {
5366 // We generated a malleable funding transaction, implying we've
5367 // just exposed ourselves to funds loss to our counterparty.
5368 #[cfg(not(fuzzing))]
5369 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5374 self.context.funding_tx_confirmation_height = height;
5375 self.context.funding_tx_confirmed_in = Some(*block_hash);
5376 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5377 Ok(scid) => Some(scid),
5378 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5381 // If this is a coinbase transaction and not a 0-conf channel
5382 // we should update our min_depth to 100 to handle coinbase maturity
5383 if tx.is_coin_base() &&
5384 self.context.minimum_depth.unwrap_or(0) > 0 &&
5385 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5386 self.context.minimum_depth = Some(COINBASE_MATURITY);
5389 // If we allow 1-conf funding, we may need to check for channel_ready here and
5390 // send it immediately instead of waiting for a best_block_updated call (which
5391 // may have already happened for this block).
5392 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5393 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5394 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5395 msgs = (Some(channel_ready), announcement_sigs);
5398 for inp in tx.input.iter() {
5399 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5400 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5401 return Err(ClosureReason::CommitmentTxConfirmed);
5409 /// When a new block is connected, we check the height of the block against outbound holding
5410 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5411 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5412 /// handled by the ChannelMonitor.
5414 /// If we return Err, the channel may have been closed, at which point the standard
5415 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5418 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5420 pub fn best_block_updated<NS: Deref, L: Deref>(
5421 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5422 node_signer: &NS, user_config: &UserConfig, logger: &L
5423 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5425 NS::Target: NodeSigner,
5428 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5431 fn do_best_block_updated<NS: Deref, L: Deref>(
5432 &mut self, height: u32, highest_header_time: u32,
5433 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5434 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5436 NS::Target: NodeSigner,
5439 let mut timed_out_htlcs = Vec::new();
5440 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5441 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5443 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5444 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5446 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5447 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5448 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5456 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5458 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5459 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5460 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5462 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5463 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5466 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5467 self.context.channel_state.is_our_channel_ready() {
5468 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5469 if self.context.funding_tx_confirmation_height == 0 {
5470 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5471 // zero if it has been reorged out, however in either case, our state flags
5472 // indicate we've already sent a channel_ready
5473 funding_tx_confirmations = 0;
5476 // If we've sent channel_ready (or have both sent and received channel_ready), and
5477 // the funding transaction has become unconfirmed,
5478 // close the channel and hope we can get the latest state on chain (because presumably
5479 // the funding transaction is at least still in the mempool of most nodes).
5481 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5482 // 0-conf channel, but not doing so may lead to the
5483 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5485 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5486 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5487 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5488 return Err(ClosureReason::ProcessingError { err: err_reason });
5490 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5491 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5492 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5493 // If funding_tx_confirmed_in is unset, the channel must not be active
5494 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5495 assert!(!self.context.channel_state.is_our_channel_ready());
5496 return Err(ClosureReason::FundingTimedOut);
5499 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5500 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5502 Ok((None, timed_out_htlcs, announcement_sigs))
5505 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5506 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5507 /// before the channel has reached channel_ready and we can just wait for more blocks.
5508 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5509 if self.context.funding_tx_confirmation_height != 0 {
5510 // We handle the funding disconnection by calling best_block_updated with a height one
5511 // below where our funding was connected, implying a reorg back to conf_height - 1.
5512 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5513 // We use the time field to bump the current time we set on channel updates if its
5514 // larger. If we don't know that time has moved forward, we can just set it to the last
5515 // time we saw and it will be ignored.
5516 let best_time = self.context.update_time_counter;
5517 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5518 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5519 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5520 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5521 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5527 // We never learned about the funding confirmation anyway, just ignore
5532 // Methods to get unprompted messages to send to the remote end (or where we already returned
5533 // something in the handler for the message that prompted this message):
5535 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5536 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5537 /// directions). Should be used for both broadcasted announcements and in response to an
5538 /// AnnouncementSignatures message from the remote peer.
5540 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5543 /// This will only return ChannelError::Ignore upon failure.
5545 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5546 fn get_channel_announcement<NS: Deref>(
5547 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5548 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5549 if !self.context.config.announced_channel {
5550 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5552 if !self.context.is_usable() {
5553 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5556 let short_channel_id = self.context.get_short_channel_id()
5557 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5558 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5559 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5560 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5561 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5563 let msg = msgs::UnsignedChannelAnnouncement {
5564 features: channelmanager::provided_channel_features(&user_config),
5567 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5568 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5569 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5570 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5571 excess_data: Vec::new(),
5577 fn get_announcement_sigs<NS: Deref, L: Deref>(
5578 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5579 best_block_height: u32, logger: &L
5580 ) -> Option<msgs::AnnouncementSignatures>
5582 NS::Target: NodeSigner,
5585 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5589 if !self.context.is_usable() {
5593 if self.context.channel_state.is_peer_disconnected() {
5594 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5598 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5602 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5603 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5606 log_trace!(logger, "{:?}", e);
5610 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5612 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5617 match &self.context.holder_signer {
5618 ChannelSignerType::Ecdsa(ecdsa) => {
5619 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5621 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5626 let short_channel_id = match self.context.get_short_channel_id() {
5628 None => return None,
5631 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5633 Some(msgs::AnnouncementSignatures {
5634 channel_id: self.context.channel_id(),
5636 node_signature: our_node_sig,
5637 bitcoin_signature: our_bitcoin_sig,
5640 // TODO (taproot|arik)
5646 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5648 fn sign_channel_announcement<NS: Deref>(
5649 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5650 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5651 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5652 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5653 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5654 let were_node_one = announcement.node_id_1 == our_node_key;
5656 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5657 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5658 match &self.context.holder_signer {
5659 ChannelSignerType::Ecdsa(ecdsa) => {
5660 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5661 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5662 Ok(msgs::ChannelAnnouncement {
5663 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5664 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5665 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5666 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5667 contents: announcement,
5670 // TODO (taproot|arik)
5675 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5679 /// Processes an incoming announcement_signatures message, providing a fully-signed
5680 /// channel_announcement message which we can broadcast and storing our counterparty's
5681 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5682 pub fn announcement_signatures<NS: Deref>(
5683 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5684 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5685 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5686 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5688 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5690 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5691 return Err(ChannelError::Close(format!(
5692 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5693 &announcement, self.context.get_counterparty_node_id())));
5695 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5696 return Err(ChannelError::Close(format!(
5697 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5698 &announcement, self.context.counterparty_funding_pubkey())));
5701 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5702 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5703 return Err(ChannelError::Ignore(
5704 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5707 self.sign_channel_announcement(node_signer, announcement)
5710 /// Gets a signed channel_announcement for this channel, if we previously received an
5711 /// announcement_signatures from our counterparty.
5712 pub fn get_signed_channel_announcement<NS: Deref>(
5713 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5714 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5715 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5718 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5720 Err(_) => return None,
5722 match self.sign_channel_announcement(node_signer, announcement) {
5723 Ok(res) => Some(res),
5728 /// May panic if called on a channel that wasn't immediately-previously
5729 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5730 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5731 assert!(self.context.channel_state.is_peer_disconnected());
5732 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5733 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5734 // current to_remote balances. However, it no longer has any use, and thus is now simply
5735 // set to a dummy (but valid, as required by the spec) public key.
5736 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5737 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5738 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5739 let mut pk = [2; 33]; pk[1] = 0xff;
5740 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5741 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5742 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5743 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5746 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5749 self.mark_awaiting_response();
5750 msgs::ChannelReestablish {
5751 channel_id: self.context.channel_id(),
5752 // The protocol has two different commitment number concepts - the "commitment
5753 // transaction number", which starts from 0 and counts up, and the "revocation key
5754 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5755 // commitment transaction numbers by the index which will be used to reveal the
5756 // revocation key for that commitment transaction, which means we have to convert them
5757 // to protocol-level commitment numbers here...
5759 // next_local_commitment_number is the next commitment_signed number we expect to
5760 // receive (indicating if they need to resend one that we missed).
5761 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5762 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5763 // receive, however we track it by the next commitment number for a remote transaction
5764 // (which is one further, as they always revoke previous commitment transaction, not
5765 // the one we send) so we have to decrement by 1. Note that if
5766 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5767 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5769 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5770 your_last_per_commitment_secret: remote_last_secret,
5771 my_current_per_commitment_point: dummy_pubkey,
5772 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5773 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5774 // txid of that interactive transaction, else we MUST NOT set it.
5775 next_funding_txid: None,
5780 // Send stuff to our remote peers:
5782 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5783 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5784 /// commitment update.
5786 /// `Err`s will only be [`ChannelError::Ignore`].
5787 pub fn queue_add_htlc<F: Deref, L: Deref>(
5788 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5789 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5790 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5791 ) -> Result<(), ChannelError>
5792 where F::Target: FeeEstimator, L::Target: Logger
5795 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5796 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5797 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5799 if let ChannelError::Ignore(_) = err { /* fine */ }
5800 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5805 /// Adds a pending outbound HTLC to this channel, note that you probably want
5806 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5808 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5810 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5811 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5813 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5814 /// we may not yet have sent the previous commitment update messages and will need to
5815 /// regenerate them.
5817 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5818 /// on this [`Channel`] if `force_holding_cell` is false.
5820 /// `Err`s will only be [`ChannelError::Ignore`].
5821 fn send_htlc<F: Deref, L: Deref>(
5822 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5823 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5824 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5825 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5826 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5827 where F::Target: FeeEstimator, L::Target: Logger
5829 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5830 self.context.channel_state.is_local_shutdown_sent() ||
5831 self.context.channel_state.is_remote_shutdown_sent()
5833 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5835 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5836 if amount_msat > channel_total_msat {
5837 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5840 if amount_msat == 0 {
5841 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5844 let available_balances = self.context.get_available_balances(fee_estimator);
5845 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5846 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5847 available_balances.next_outbound_htlc_minimum_msat)));
5850 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5851 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5852 available_balances.next_outbound_htlc_limit_msat)));
5855 if self.context.channel_state.is_peer_disconnected() {
5856 // Note that this should never really happen, if we're !is_live() on receipt of an
5857 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5858 // the user to send directly into a !is_live() channel. However, if we
5859 // disconnected during the time the previous hop was doing the commitment dance we may
5860 // end up getting here after the forwarding delay. In any case, returning an
5861 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5862 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5865 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5866 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5867 payment_hash, amount_msat,
5868 if force_holding_cell { "into holding cell" }
5869 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5870 else { "to peer" });
5872 if need_holding_cell {
5873 force_holding_cell = true;
5876 // Now update local state:
5877 if force_holding_cell {
5878 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5883 onion_routing_packet,
5890 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5891 htlc_id: self.context.next_holder_htlc_id,
5893 payment_hash: payment_hash.clone(),
5895 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5901 let res = msgs::UpdateAddHTLC {
5902 channel_id: self.context.channel_id,
5903 htlc_id: self.context.next_holder_htlc_id,
5907 onion_routing_packet,
5911 self.context.next_holder_htlc_id += 1;
5916 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5917 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5918 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5919 // fail to generate this, we still are at least at a position where upgrading their status
5921 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5922 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5923 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5925 if let Some(state) = new_state {
5926 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5930 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5931 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5932 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5933 // Grab the preimage, if it exists, instead of cloning
5934 let mut reason = OutboundHTLCOutcome::Success(None);
5935 mem::swap(outcome, &mut reason);
5936 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5939 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5940 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5941 debug_assert!(!self.context.is_outbound());
5942 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5943 self.context.feerate_per_kw = feerate;
5944 self.context.pending_update_fee = None;
5947 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5949 let (mut htlcs_ref, counterparty_commitment_tx) =
5950 self.build_commitment_no_state_update(logger);
5951 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5952 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5953 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5955 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5956 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5959 self.context.latest_monitor_update_id += 1;
5960 let monitor_update = ChannelMonitorUpdate {
5961 update_id: self.context.latest_monitor_update_id,
5962 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5963 commitment_txid: counterparty_commitment_txid,
5964 htlc_outputs: htlcs.clone(),
5965 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5966 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5967 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5968 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5969 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5972 self.context.channel_state.set_awaiting_remote_revoke();
5976 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5977 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5978 where L::Target: Logger
5980 let counterparty_keys = self.context.build_remote_transaction_keys();
5981 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5982 let counterparty_commitment_tx = commitment_stats.tx;
5984 #[cfg(any(test, fuzzing))]
5986 if !self.context.is_outbound() {
5987 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5988 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5989 if let Some(info) = projected_commit_tx_info {
5990 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5991 if info.total_pending_htlcs == total_pending_htlcs
5992 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5993 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5994 && info.feerate == self.context.feerate_per_kw {
5995 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5996 assert_eq!(actual_fee, info.fee);
6002 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6005 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6006 /// generation when we shouldn't change HTLC/channel state.
6007 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6008 // Get the fee tests from `build_commitment_no_state_update`
6009 #[cfg(any(test, fuzzing))]
6010 self.build_commitment_no_state_update(logger);
6012 let counterparty_keys = self.context.build_remote_transaction_keys();
6013 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6014 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6016 match &self.context.holder_signer {
6017 ChannelSignerType::Ecdsa(ecdsa) => {
6018 let (signature, htlc_signatures);
6021 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6022 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6026 let res = ecdsa.sign_counterparty_commitment(
6027 &commitment_stats.tx,
6028 commitment_stats.inbound_htlc_preimages,
6029 commitment_stats.outbound_htlc_preimages,
6030 &self.context.secp_ctx,
6031 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6033 htlc_signatures = res.1;
6035 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6036 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6037 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6038 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6040 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6041 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6042 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6043 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6044 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6045 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6049 Ok((msgs::CommitmentSigned {
6050 channel_id: self.context.channel_id,
6054 partial_signature_with_nonce: None,
6055 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6057 // TODO (taproot|arik)
6063 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6064 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6066 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6067 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6068 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6069 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6070 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6071 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6072 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6073 where F::Target: FeeEstimator, L::Target: Logger
6075 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6076 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6077 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6080 let monitor_update = self.build_commitment_no_status_check(logger);
6081 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6082 Ok(self.push_ret_blockable_mon_update(monitor_update))
6088 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6090 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6091 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6092 fee_base_msat: msg.contents.fee_base_msat,
6093 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6094 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6096 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6098 self.context.counterparty_forwarding_info = new_forwarding_info;
6104 /// Begins the shutdown process, getting a message for the remote peer and returning all
6105 /// holding cell HTLCs for payment failure.
6107 /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no
6108 /// [`ChannelMonitorUpdate`] will be returned).
6109 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6110 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6111 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>, Option<ShutdownResult>), APIError>
6113 for htlc in self.context.pending_outbound_htlcs.iter() {
6114 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6115 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6118 if self.context.channel_state.is_local_shutdown_sent() {
6119 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6121 else if self.context.channel_state.is_remote_shutdown_sent() {
6122 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6124 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6125 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6127 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6128 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6129 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6132 // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown
6133 // script is set, we just force-close and call it a day.
6134 let mut chan_closed = false;
6135 if self.context.channel_state.is_pre_funded_state() {
6139 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6141 None if !chan_closed => {
6142 // use override shutdown script if provided
6143 let shutdown_scriptpubkey = match override_shutdown_script {
6144 Some(script) => script,
6146 // otherwise, use the shutdown scriptpubkey provided by the signer
6147 match signer_provider.get_shutdown_scriptpubkey() {
6148 Ok(scriptpubkey) => scriptpubkey,
6149 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6153 if !shutdown_scriptpubkey.is_compatible(their_features) {
6154 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6156 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6162 // From here on out, we may not fail!
6163 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6164 let shutdown_result = if self.context.channel_state.is_pre_funded_state() {
6165 let shutdown_result = ShutdownResult {
6166 monitor_update: None,
6167 dropped_outbound_htlcs: Vec::new(),
6168 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
6169 channel_id: self.context.channel_id,
6170 counterparty_node_id: self.context.counterparty_node_id,
6172 self.context.channel_state = ChannelState::ShutdownComplete;
6173 Some(shutdown_result)
6175 self.context.channel_state.set_local_shutdown_sent();
6178 self.context.update_time_counter += 1;
6180 let monitor_update = if update_shutdown_script {
6181 self.context.latest_monitor_update_id += 1;
6182 let monitor_update = ChannelMonitorUpdate {
6183 update_id: self.context.latest_monitor_update_id,
6184 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6185 scriptpubkey: self.get_closing_scriptpubkey(),
6188 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6189 self.push_ret_blockable_mon_update(monitor_update)
6191 let shutdown = msgs::Shutdown {
6192 channel_id: self.context.channel_id,
6193 scriptpubkey: self.get_closing_scriptpubkey(),
6196 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6197 // our shutdown until we've committed all of the pending changes.
6198 self.context.holding_cell_update_fee = None;
6199 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6200 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6202 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6203 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6210 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6211 "we can't both complete shutdown and return a monitor update");
6213 Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result))
6216 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6217 self.context.holding_cell_htlc_updates.iter()
6218 .flat_map(|htlc_update| {
6220 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6221 => Some((source, payment_hash)),
6225 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6229 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6230 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6231 pub context: ChannelContext<SP>,
6232 pub unfunded_context: UnfundedChannelContext,
6235 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6236 pub fn new<ES: Deref, F: Deref>(
6237 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6238 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6239 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6240 ) -> Result<OutboundV1Channel<SP>, APIError>
6241 where ES::Target: EntropySource,
6242 F::Target: FeeEstimator
6244 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6245 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6246 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6247 let pubkeys = holder_signer.pubkeys().clone();
6249 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6250 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6252 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6253 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6255 let channel_value_msat = channel_value_satoshis * 1000;
6256 if push_msat > channel_value_msat {
6257 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6259 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6260 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6262 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6263 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6264 // Protocol level safety check in place, although it should never happen because
6265 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6266 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6269 let channel_type = Self::get_initial_channel_type(&config, their_features);
6270 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6272 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6273 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6275 (ConfirmationTarget::NonAnchorChannelFee, 0)
6277 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6279 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6280 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6281 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6282 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6285 let mut secp_ctx = Secp256k1::new();
6286 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6288 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6289 match signer_provider.get_shutdown_scriptpubkey() {
6290 Ok(scriptpubkey) => Some(scriptpubkey),
6291 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6295 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6296 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6297 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6301 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6302 Ok(script) => script,
6303 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6306 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6309 context: ChannelContext {
6312 config: LegacyChannelConfig {
6313 options: config.channel_config.clone(),
6314 announced_channel: config.channel_handshake_config.announced_channel,
6315 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6320 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6322 channel_id: temporary_channel_id,
6323 temporary_channel_id: Some(temporary_channel_id),
6324 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6325 announcement_sigs_state: AnnouncementSigsState::NotSent,
6327 channel_value_satoshis,
6329 latest_monitor_update_id: 0,
6331 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6332 shutdown_scriptpubkey,
6335 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6336 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6339 pending_inbound_htlcs: Vec::new(),
6340 pending_outbound_htlcs: Vec::new(),
6341 holding_cell_htlc_updates: Vec::new(),
6342 pending_update_fee: None,
6343 holding_cell_update_fee: None,
6344 next_holder_htlc_id: 0,
6345 next_counterparty_htlc_id: 0,
6346 update_time_counter: 1,
6348 resend_order: RAACommitmentOrder::CommitmentFirst,
6350 monitor_pending_channel_ready: false,
6351 monitor_pending_revoke_and_ack: false,
6352 monitor_pending_commitment_signed: false,
6353 monitor_pending_forwards: Vec::new(),
6354 monitor_pending_failures: Vec::new(),
6355 monitor_pending_finalized_fulfills: Vec::new(),
6357 signer_pending_commitment_update: false,
6358 signer_pending_funding: false,
6360 #[cfg(debug_assertions)]
6361 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6362 #[cfg(debug_assertions)]
6363 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6365 last_sent_closing_fee: None,
6366 pending_counterparty_closing_signed: None,
6367 expecting_peer_commitment_signed: false,
6368 closing_fee_limits: None,
6369 target_closing_feerate_sats_per_kw: None,
6371 funding_tx_confirmed_in: None,
6372 funding_tx_confirmation_height: 0,
6373 short_channel_id: None,
6374 channel_creation_height: current_chain_height,
6376 feerate_per_kw: commitment_feerate,
6377 counterparty_dust_limit_satoshis: 0,
6378 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6379 counterparty_max_htlc_value_in_flight_msat: 0,
6380 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6381 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6382 holder_selected_channel_reserve_satoshis,
6383 counterparty_htlc_minimum_msat: 0,
6384 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6385 counterparty_max_accepted_htlcs: 0,
6386 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6387 minimum_depth: None, // Filled in in accept_channel
6389 counterparty_forwarding_info: None,
6391 channel_transaction_parameters: ChannelTransactionParameters {
6392 holder_pubkeys: pubkeys,
6393 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6394 is_outbound_from_holder: true,
6395 counterparty_parameters: None,
6396 funding_outpoint: None,
6397 channel_type_features: channel_type.clone()
6399 funding_transaction: None,
6400 is_batch_funding: None,
6402 counterparty_cur_commitment_point: None,
6403 counterparty_prev_commitment_point: None,
6404 counterparty_node_id,
6406 counterparty_shutdown_scriptpubkey: None,
6408 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6410 channel_update_status: ChannelUpdateStatus::Enabled,
6411 closing_signed_in_flight: false,
6413 announcement_sigs: None,
6415 #[cfg(any(test, fuzzing))]
6416 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6417 #[cfg(any(test, fuzzing))]
6418 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6420 workaround_lnd_bug_4006: None,
6421 sent_message_awaiting_response: None,
6423 latest_inbound_scid_alias: None,
6424 outbound_scid_alias,
6426 channel_pending_event_emitted: false,
6427 channel_ready_event_emitted: false,
6429 #[cfg(any(test, fuzzing))]
6430 historical_inbound_htlc_fulfills: HashSet::new(),
6435 blocked_monitor_updates: Vec::new(),
6437 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6441 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6442 /// a funding_created message for the remote peer.
6443 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6444 /// or if called on an inbound channel.
6445 /// Note that channel_id changes during this call!
6446 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6447 /// If an Err is returned, it is a ChannelError::Close.
6448 pub fn get_funding_created<L: Deref>(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6449 -> Result<(Channel<SP>, Option<msgs::FundingCreated>), (Self, ChannelError)> where L::Target: Logger {
6450 if !self.context.is_outbound() {
6451 panic!("Tried to create outbound funding_created message on an inbound channel!");
6454 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6455 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6457 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6459 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6460 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6461 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6462 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6465 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6466 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6468 // Now that we're past error-generating stuff, update our local state:
6470 self.context.channel_state = ChannelState::FundingNegotiated;
6471 self.context.channel_id = funding_txo.to_channel_id();
6473 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6474 // We can skip this if it is a zero-conf channel.
6475 if funding_transaction.is_coin_base() &&
6476 self.context.minimum_depth.unwrap_or(0) > 0 &&
6477 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6478 self.context.minimum_depth = Some(COINBASE_MATURITY);
6481 self.context.funding_transaction = Some(funding_transaction);
6482 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6484 let funding_created = self.context.get_funding_created_msg(logger);
6485 if funding_created.is_none() {
6486 if !self.context.signer_pending_funding {
6487 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6488 self.context.signer_pending_funding = true;
6492 let channel = Channel {
6493 context: self.context,
6496 Ok((channel, funding_created))
6499 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6500 // The default channel type (ie the first one we try) depends on whether the channel is
6501 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6502 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6503 // with no other changes, and fall back to `only_static_remotekey`.
6504 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6505 if !config.channel_handshake_config.announced_channel &&
6506 config.channel_handshake_config.negotiate_scid_privacy &&
6507 their_features.supports_scid_privacy() {
6508 ret.set_scid_privacy_required();
6511 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6512 // set it now. If they don't understand it, we'll fall back to our default of
6513 // `only_static_remotekey`.
6514 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6515 their_features.supports_anchors_zero_fee_htlc_tx() {
6516 ret.set_anchors_zero_fee_htlc_tx_required();
6522 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6523 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6524 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6525 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6526 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6527 ) -> Result<msgs::OpenChannel, ()>
6529 F::Target: FeeEstimator
6531 if !self.context.is_outbound() ||
6533 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6534 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6539 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6540 // We've exhausted our options
6543 // We support opening a few different types of channels. Try removing our additional
6544 // features one by one until we've either arrived at our default or the counterparty has
6547 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6548 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6549 // checks whether the counterparty supports every feature, this would only happen if the
6550 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6552 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6553 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6554 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6555 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6556 } else if self.context.channel_type.supports_scid_privacy() {
6557 self.context.channel_type.clear_scid_privacy();
6559 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6561 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6562 Ok(self.get_open_channel(chain_hash))
6565 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6566 if !self.context.is_outbound() {
6567 panic!("Tried to open a channel for an inbound channel?");
6569 if self.context.have_received_message() {
6570 panic!("Cannot generate an open_channel after we've moved forward");
6573 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6574 panic!("Tried to send an open_channel for a channel that has already advanced");
6577 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6578 let keys = self.context.get_holder_pubkeys();
6582 temporary_channel_id: self.context.channel_id,
6583 funding_satoshis: self.context.channel_value_satoshis,
6584 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6585 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6586 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6587 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6588 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6589 feerate_per_kw: self.context.feerate_per_kw as u32,
6590 to_self_delay: self.context.get_holder_selected_contest_delay(),
6591 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6592 funding_pubkey: keys.funding_pubkey,
6593 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6594 payment_point: keys.payment_point,
6595 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6596 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6597 first_per_commitment_point,
6598 channel_flags: if self.context.config.announced_channel {1} else {0},
6599 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6600 Some(script) => script.clone().into_inner(),
6601 None => Builder::new().into_script(),
6603 channel_type: Some(self.context.channel_type.clone()),
6608 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6609 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6611 // Check sanity of message fields:
6612 if !self.context.is_outbound() {
6613 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6615 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6616 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6618 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6619 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6621 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6622 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6624 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6625 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6627 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6628 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6629 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6631 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6632 if msg.htlc_minimum_msat >= full_channel_value_msat {
6633 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6635 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6636 if msg.to_self_delay > max_delay_acceptable {
6637 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6639 if msg.max_accepted_htlcs < 1 {
6640 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6642 if msg.max_accepted_htlcs > MAX_HTLCS {
6643 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6646 // Now check against optional parameters as set by config...
6647 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6648 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6650 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6651 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6653 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6654 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6656 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6657 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6659 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6660 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6662 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6663 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6665 if msg.minimum_depth > peer_limits.max_minimum_depth {
6666 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6669 if let Some(ty) = &msg.channel_type {
6670 if *ty != self.context.channel_type {
6671 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6673 } else if their_features.supports_channel_type() {
6674 // Assume they've accepted the channel type as they said they understand it.
6676 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6677 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6678 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6680 self.context.channel_type = channel_type.clone();
6681 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6684 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6685 match &msg.shutdown_scriptpubkey {
6686 &Some(ref script) => {
6687 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6688 if script.len() == 0 {
6691 if !script::is_bolt2_compliant(&script, their_features) {
6692 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6694 Some(script.clone())
6697 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6699 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6704 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6705 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6706 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6707 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6708 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6710 if peer_limits.trust_own_funding_0conf {
6711 self.context.minimum_depth = Some(msg.minimum_depth);
6713 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6716 let counterparty_pubkeys = ChannelPublicKeys {
6717 funding_pubkey: msg.funding_pubkey,
6718 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6719 payment_point: msg.payment_point,
6720 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6721 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6724 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6725 selected_contest_delay: msg.to_self_delay,
6726 pubkeys: counterparty_pubkeys,
6729 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6730 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6732 self.context.channel_state = ChannelState::NegotiatingFunding(
6733 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6735 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6741 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6742 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6743 pub context: ChannelContext<SP>,
6744 pub unfunded_context: UnfundedChannelContext,
6747 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6748 /// Creates a new channel from a remote sides' request for one.
6749 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6750 pub fn new<ES: Deref, F: Deref, L: Deref>(
6751 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6752 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6753 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6754 current_chain_height: u32, logger: &L, is_0conf: bool,
6755 ) -> Result<InboundV1Channel<SP>, ChannelError>
6756 where ES::Target: EntropySource,
6757 F::Target: FeeEstimator,
6760 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6761 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6763 // First check the channel type is known, failing before we do anything else if we don't
6764 // support this channel type.
6765 let channel_type = if let Some(channel_type) = &msg.channel_type {
6766 if channel_type.supports_any_optional_bits() {
6767 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6770 // We only support the channel types defined by the `ChannelManager` in
6771 // `provided_channel_type_features`. The channel type must always support
6772 // `static_remote_key`.
6773 if !channel_type.requires_static_remote_key() {
6774 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6776 // Make sure we support all of the features behind the channel type.
6777 if !channel_type.is_subset(our_supported_features) {
6778 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6780 if channel_type.requires_scid_privacy() && announced_channel {
6781 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6783 channel_type.clone()
6785 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6786 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6787 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6792 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6793 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6794 let pubkeys = holder_signer.pubkeys().clone();
6795 let counterparty_pubkeys = ChannelPublicKeys {
6796 funding_pubkey: msg.funding_pubkey,
6797 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6798 payment_point: msg.payment_point,
6799 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6800 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6803 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6804 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6807 // Check sanity of message fields:
6808 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6809 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6811 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6812 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6814 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6815 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6817 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6818 if msg.push_msat > full_channel_value_msat {
6819 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6821 if msg.dust_limit_satoshis > msg.funding_satoshis {
6822 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6824 if msg.htlc_minimum_msat >= full_channel_value_msat {
6825 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6827 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6829 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6830 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6831 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6833 if msg.max_accepted_htlcs < 1 {
6834 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6836 if msg.max_accepted_htlcs > MAX_HTLCS {
6837 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6840 // Now check against optional parameters as set by config...
6841 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6842 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6844 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6845 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6847 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6848 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6850 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6851 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6853 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6854 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6856 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6857 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6859 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6860 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6863 // Convert things into internal flags and prep our state:
6865 if config.channel_handshake_limits.force_announced_channel_preference {
6866 if config.channel_handshake_config.announced_channel != announced_channel {
6867 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6871 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6872 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6873 // Protocol level safety check in place, although it should never happen because
6874 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6875 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6877 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6878 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6880 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6881 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6882 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6884 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6885 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6888 // check if the funder's amount for the initial commitment tx is sufficient
6889 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6890 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6891 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6895 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6896 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6897 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6898 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6901 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6902 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6903 // want to push much to us), our counterparty should always have more than our reserve.
6904 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6905 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6908 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6909 match &msg.shutdown_scriptpubkey {
6910 &Some(ref script) => {
6911 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6912 if script.len() == 0 {
6915 if !script::is_bolt2_compliant(&script, their_features) {
6916 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6918 Some(script.clone())
6921 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6923 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6928 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6929 match signer_provider.get_shutdown_scriptpubkey() {
6930 Ok(scriptpubkey) => Some(scriptpubkey),
6931 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6935 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6936 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6937 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6941 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6942 Ok(script) => script,
6943 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6946 let mut secp_ctx = Secp256k1::new();
6947 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6949 let minimum_depth = if is_0conf {
6952 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6956 context: ChannelContext {
6959 config: LegacyChannelConfig {
6960 options: config.channel_config.clone(),
6962 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6967 inbound_handshake_limits_override: None,
6969 temporary_channel_id: Some(msg.temporary_channel_id),
6970 channel_id: msg.temporary_channel_id,
6971 channel_state: ChannelState::NegotiatingFunding(
6972 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6974 announcement_sigs_state: AnnouncementSigsState::NotSent,
6977 latest_monitor_update_id: 0,
6979 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6980 shutdown_scriptpubkey,
6983 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6984 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6985 value_to_self_msat: msg.push_msat,
6987 pending_inbound_htlcs: Vec::new(),
6988 pending_outbound_htlcs: Vec::new(),
6989 holding_cell_htlc_updates: Vec::new(),
6990 pending_update_fee: None,
6991 holding_cell_update_fee: None,
6992 next_holder_htlc_id: 0,
6993 next_counterparty_htlc_id: 0,
6994 update_time_counter: 1,
6996 resend_order: RAACommitmentOrder::CommitmentFirst,
6998 monitor_pending_channel_ready: false,
6999 monitor_pending_revoke_and_ack: false,
7000 monitor_pending_commitment_signed: false,
7001 monitor_pending_forwards: Vec::new(),
7002 monitor_pending_failures: Vec::new(),
7003 monitor_pending_finalized_fulfills: Vec::new(),
7005 signer_pending_commitment_update: false,
7006 signer_pending_funding: false,
7008 #[cfg(debug_assertions)]
7009 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7010 #[cfg(debug_assertions)]
7011 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7013 last_sent_closing_fee: None,
7014 pending_counterparty_closing_signed: None,
7015 expecting_peer_commitment_signed: false,
7016 closing_fee_limits: None,
7017 target_closing_feerate_sats_per_kw: None,
7019 funding_tx_confirmed_in: None,
7020 funding_tx_confirmation_height: 0,
7021 short_channel_id: None,
7022 channel_creation_height: current_chain_height,
7024 feerate_per_kw: msg.feerate_per_kw,
7025 channel_value_satoshis: msg.funding_satoshis,
7026 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7027 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7028 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7029 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7030 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7031 holder_selected_channel_reserve_satoshis,
7032 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7033 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7034 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7035 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7038 counterparty_forwarding_info: None,
7040 channel_transaction_parameters: ChannelTransactionParameters {
7041 holder_pubkeys: pubkeys,
7042 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7043 is_outbound_from_holder: false,
7044 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7045 selected_contest_delay: msg.to_self_delay,
7046 pubkeys: counterparty_pubkeys,
7048 funding_outpoint: None,
7049 channel_type_features: channel_type.clone()
7051 funding_transaction: None,
7052 is_batch_funding: None,
7054 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7055 counterparty_prev_commitment_point: None,
7056 counterparty_node_id,
7058 counterparty_shutdown_scriptpubkey,
7060 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7062 channel_update_status: ChannelUpdateStatus::Enabled,
7063 closing_signed_in_flight: false,
7065 announcement_sigs: None,
7067 #[cfg(any(test, fuzzing))]
7068 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7069 #[cfg(any(test, fuzzing))]
7070 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7072 workaround_lnd_bug_4006: None,
7073 sent_message_awaiting_response: None,
7075 latest_inbound_scid_alias: None,
7076 outbound_scid_alias: 0,
7078 channel_pending_event_emitted: false,
7079 channel_ready_event_emitted: false,
7081 #[cfg(any(test, fuzzing))]
7082 historical_inbound_htlc_fulfills: HashSet::new(),
7087 blocked_monitor_updates: Vec::new(),
7089 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7095 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7096 /// should be sent back to the counterparty node.
7098 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7099 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7100 if self.context.is_outbound() {
7101 panic!("Tried to send accept_channel for an outbound channel?");
7104 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7105 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7107 panic!("Tried to send accept_channel after channel had moved forward");
7109 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7110 panic!("Tried to send an accept_channel for a channel that has already advanced");
7113 self.generate_accept_channel_message()
7116 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7117 /// inbound channel. If the intention is to accept an inbound channel, use
7118 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7120 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7121 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7122 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7123 let keys = self.context.get_holder_pubkeys();
7125 msgs::AcceptChannel {
7126 temporary_channel_id: self.context.channel_id,
7127 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7128 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7129 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7130 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7131 minimum_depth: self.context.minimum_depth.unwrap(),
7132 to_self_delay: self.context.get_holder_selected_contest_delay(),
7133 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7134 funding_pubkey: keys.funding_pubkey,
7135 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7136 payment_point: keys.payment_point,
7137 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7138 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7139 first_per_commitment_point,
7140 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7141 Some(script) => script.clone().into_inner(),
7142 None => Builder::new().into_script(),
7144 channel_type: Some(self.context.channel_type.clone()),
7146 next_local_nonce: None,
7150 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7151 /// inbound channel without accepting it.
7153 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7155 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7156 self.generate_accept_channel_message()
7159 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7160 let funding_script = self.context.get_funding_redeemscript();
7162 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7163 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7164 let trusted_tx = initial_commitment_tx.trust();
7165 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7166 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7167 // They sign the holder commitment transaction...
7168 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7169 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7170 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7171 encode::serialize_hex(&funding_script), &self.context.channel_id());
7172 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7174 Ok(initial_commitment_tx)
7177 pub fn funding_created<L: Deref>(
7178 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7179 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7183 if self.context.is_outbound() {
7184 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7187 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7188 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7190 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7191 // remember the channel, so it's safe to just send an error_message here and drop the
7193 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7195 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7196 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7197 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7198 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7201 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7202 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7203 // This is an externally observable change before we finish all our checks. In particular
7204 // check_funding_created_signature may fail.
7205 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7207 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7209 Err(ChannelError::Close(e)) => {
7210 self.context.channel_transaction_parameters.funding_outpoint = None;
7211 return Err((self, ChannelError::Close(e)));
7214 // The only error we know how to handle is ChannelError::Close, so we fall over here
7215 // to make sure we don't continue with an inconsistent state.
7216 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7220 let holder_commitment_tx = HolderCommitmentTransaction::new(
7221 initial_commitment_tx,
7224 &self.context.get_holder_pubkeys().funding_pubkey,
7225 self.context.counterparty_funding_pubkey()
7228 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7229 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7232 // Now that we're past error-generating stuff, update our local state:
7234 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7235 self.context.channel_id = funding_txo.to_channel_id();
7236 self.context.cur_counterparty_commitment_transaction_number -= 1;
7237 self.context.cur_holder_commitment_transaction_number -= 1;
7239 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7241 let funding_redeemscript = self.context.get_funding_redeemscript();
7242 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7243 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7244 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7245 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7246 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7247 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7248 shutdown_script, self.context.get_holder_selected_contest_delay(),
7249 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7250 &self.context.channel_transaction_parameters,
7251 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7253 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7254 channel_monitor.provide_initial_counterparty_commitment_tx(
7255 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7256 self.context.cur_counterparty_commitment_transaction_number + 1,
7257 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7258 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7259 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7261 log_info!(logger, "{} funding_signed for peer for channel {}",
7262 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7264 // Promote the channel to a full-fledged one now that we have updated the state and have a
7265 // `ChannelMonitor`.
7266 let mut channel = Channel {
7267 context: self.context,
7269 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7270 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7272 Ok((channel, funding_signed, channel_monitor))
7276 const SERIALIZATION_VERSION: u8 = 3;
7277 const MIN_SERIALIZATION_VERSION: u8 = 3;
7279 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7285 impl Writeable for ChannelUpdateStatus {
7286 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7287 // We only care about writing out the current state as it was announced, ie only either
7288 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7289 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7291 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7292 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7293 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7294 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7300 impl Readable for ChannelUpdateStatus {
7301 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7302 Ok(match <u8 as Readable>::read(reader)? {
7303 0 => ChannelUpdateStatus::Enabled,
7304 1 => ChannelUpdateStatus::Disabled,
7305 _ => return Err(DecodeError::InvalidValue),
7310 impl Writeable for AnnouncementSigsState {
7311 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7312 // We only care about writing out the current state as if we had just disconnected, at
7313 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7315 AnnouncementSigsState::NotSent => 0u8.write(writer),
7316 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7317 AnnouncementSigsState::Committed => 0u8.write(writer),
7318 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7323 impl Readable for AnnouncementSigsState {
7324 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7325 Ok(match <u8 as Readable>::read(reader)? {
7326 0 => AnnouncementSigsState::NotSent,
7327 1 => AnnouncementSigsState::PeerReceived,
7328 _ => return Err(DecodeError::InvalidValue),
7333 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7334 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7335 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7338 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7340 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7341 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7342 // the low bytes now and the optional high bytes later.
7343 let user_id_low = self.context.user_id as u64;
7344 user_id_low.write(writer)?;
7346 // Version 1 deserializers expected to read parts of the config object here. Version 2
7347 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7348 // `minimum_depth` we simply write dummy values here.
7349 writer.write_all(&[0; 8])?;
7351 self.context.channel_id.write(writer)?;
7353 let mut channel_state = self.context.channel_state;
7354 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7355 channel_state.set_peer_disconnected();
7357 channel_state.to_u32().write(writer)?;
7359 self.context.channel_value_satoshis.write(writer)?;
7361 self.context.latest_monitor_update_id.write(writer)?;
7363 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7364 // deserialized from that format.
7365 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7366 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7367 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7369 self.context.destination_script.write(writer)?;
7371 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7372 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7373 self.context.value_to_self_msat.write(writer)?;
7375 let mut dropped_inbound_htlcs = 0;
7376 for htlc in self.context.pending_inbound_htlcs.iter() {
7377 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7378 dropped_inbound_htlcs += 1;
7381 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7382 for htlc in self.context.pending_inbound_htlcs.iter() {
7383 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7386 htlc.htlc_id.write(writer)?;
7387 htlc.amount_msat.write(writer)?;
7388 htlc.cltv_expiry.write(writer)?;
7389 htlc.payment_hash.write(writer)?;
7391 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7392 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7394 htlc_state.write(writer)?;
7396 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7398 htlc_state.write(writer)?;
7400 &InboundHTLCState::Committed => {
7403 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7405 removal_reason.write(writer)?;
7410 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7411 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7412 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7414 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7415 for htlc in self.context.pending_outbound_htlcs.iter() {
7416 htlc.htlc_id.write(writer)?;
7417 htlc.amount_msat.write(writer)?;
7418 htlc.cltv_expiry.write(writer)?;
7419 htlc.payment_hash.write(writer)?;
7420 htlc.source.write(writer)?;
7422 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7424 onion_packet.write(writer)?;
7426 &OutboundHTLCState::Committed => {
7429 &OutboundHTLCState::RemoteRemoved(_) => {
7430 // Treat this as a Committed because we haven't received the CS - they'll
7431 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7434 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7436 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7437 preimages.push(preimage);
7439 let reason: Option<&HTLCFailReason> = outcome.into();
7440 reason.write(writer)?;
7442 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7444 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7445 preimages.push(preimage);
7447 let reason: Option<&HTLCFailReason> = outcome.into();
7448 reason.write(writer)?;
7451 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7452 pending_outbound_blinding_points.push(htlc.blinding_point);
7455 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7456 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7457 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7458 for update in self.context.holding_cell_htlc_updates.iter() {
7460 &HTLCUpdateAwaitingACK::AddHTLC {
7461 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7462 blinding_point, skimmed_fee_msat,
7465 amount_msat.write(writer)?;
7466 cltv_expiry.write(writer)?;
7467 payment_hash.write(writer)?;
7468 source.write(writer)?;
7469 onion_routing_packet.write(writer)?;
7471 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7472 holding_cell_blinding_points.push(blinding_point);
7474 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7476 payment_preimage.write(writer)?;
7477 htlc_id.write(writer)?;
7479 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7481 htlc_id.write(writer)?;
7482 err_packet.write(writer)?;
7487 match self.context.resend_order {
7488 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7489 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7492 self.context.monitor_pending_channel_ready.write(writer)?;
7493 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7494 self.context.monitor_pending_commitment_signed.write(writer)?;
7496 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7497 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7498 pending_forward.write(writer)?;
7499 htlc_id.write(writer)?;
7502 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7503 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7504 htlc_source.write(writer)?;
7505 payment_hash.write(writer)?;
7506 fail_reason.write(writer)?;
7509 if self.context.is_outbound() {
7510 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7511 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7512 Some(feerate).write(writer)?;
7514 // As for inbound HTLCs, if the update was only announced and never committed in a
7515 // commitment_signed, drop it.
7516 None::<u32>.write(writer)?;
7518 self.context.holding_cell_update_fee.write(writer)?;
7520 self.context.next_holder_htlc_id.write(writer)?;
7521 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7522 self.context.update_time_counter.write(writer)?;
7523 self.context.feerate_per_kw.write(writer)?;
7525 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7526 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7527 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7528 // consider the stale state on reload.
7531 self.context.funding_tx_confirmed_in.write(writer)?;
7532 self.context.funding_tx_confirmation_height.write(writer)?;
7533 self.context.short_channel_id.write(writer)?;
7535 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7536 self.context.holder_dust_limit_satoshis.write(writer)?;
7537 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7539 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7540 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7542 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7543 self.context.holder_htlc_minimum_msat.write(writer)?;
7544 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7546 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7547 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7549 match &self.context.counterparty_forwarding_info {
7552 info.fee_base_msat.write(writer)?;
7553 info.fee_proportional_millionths.write(writer)?;
7554 info.cltv_expiry_delta.write(writer)?;
7556 None => 0u8.write(writer)?
7559 self.context.channel_transaction_parameters.write(writer)?;
7560 self.context.funding_transaction.write(writer)?;
7562 self.context.counterparty_cur_commitment_point.write(writer)?;
7563 self.context.counterparty_prev_commitment_point.write(writer)?;
7564 self.context.counterparty_node_id.write(writer)?;
7566 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7568 self.context.commitment_secrets.write(writer)?;
7570 self.context.channel_update_status.write(writer)?;
7572 #[cfg(any(test, fuzzing))]
7573 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7574 #[cfg(any(test, fuzzing))]
7575 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7576 htlc.write(writer)?;
7579 // If the channel type is something other than only-static-remote-key, then we need to have
7580 // older clients fail to deserialize this channel at all. If the type is
7581 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7583 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7584 Some(&self.context.channel_type) } else { None };
7586 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7587 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7588 // a different percentage of the channel value then 10%, which older versions of LDK used
7589 // to set it to before the percentage was made configurable.
7590 let serialized_holder_selected_reserve =
7591 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7592 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7594 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7595 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7596 let serialized_holder_htlc_max_in_flight =
7597 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7598 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7600 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7601 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7603 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7604 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7605 // we write the high bytes as an option here.
7606 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7608 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7610 write_tlv_fields!(writer, {
7611 (0, self.context.announcement_sigs, option),
7612 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7613 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7614 // them twice, once with their original default values above, and once as an option
7615 // here. On the read side, old versions will simply ignore the odd-type entries here,
7616 // and new versions map the default values to None and allow the TLV entries here to
7618 (1, self.context.minimum_depth, option),
7619 (2, chan_type, option),
7620 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7621 (4, serialized_holder_selected_reserve, option),
7622 (5, self.context.config, required),
7623 (6, serialized_holder_htlc_max_in_flight, option),
7624 (7, self.context.shutdown_scriptpubkey, option),
7625 (8, self.context.blocked_monitor_updates, optional_vec),
7626 (9, self.context.target_closing_feerate_sats_per_kw, option),
7627 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7628 (13, self.context.channel_creation_height, required),
7629 (15, preimages, required_vec),
7630 (17, self.context.announcement_sigs_state, required),
7631 (19, self.context.latest_inbound_scid_alias, option),
7632 (21, self.context.outbound_scid_alias, required),
7633 (23, channel_ready_event_emitted, option),
7634 (25, user_id_high_opt, option),
7635 (27, self.context.channel_keys_id, required),
7636 (28, holder_max_accepted_htlcs, option),
7637 (29, self.context.temporary_channel_id, option),
7638 (31, channel_pending_event_emitted, option),
7639 (35, pending_outbound_skimmed_fees, optional_vec),
7640 (37, holding_cell_skimmed_fees, optional_vec),
7641 (38, self.context.is_batch_funding, option),
7642 (39, pending_outbound_blinding_points, optional_vec),
7643 (41, holding_cell_blinding_points, optional_vec),
7650 const MAX_ALLOC_SIZE: usize = 64*1024;
7651 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7653 ES::Target: EntropySource,
7654 SP::Target: SignerProvider
7656 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7657 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7658 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7660 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7661 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7662 // the low bytes now and the high bytes later.
7663 let user_id_low: u64 = Readable::read(reader)?;
7665 let mut config = Some(LegacyChannelConfig::default());
7667 // Read the old serialization of the ChannelConfig from version 0.0.98.
7668 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7669 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7670 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7671 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7673 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7674 let mut _val: u64 = Readable::read(reader)?;
7677 let channel_id = Readable::read(reader)?;
7678 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7679 let channel_value_satoshis = Readable::read(reader)?;
7681 let latest_monitor_update_id = Readable::read(reader)?;
7683 let mut keys_data = None;
7685 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7686 // the `channel_keys_id` TLV is present below.
7687 let keys_len: u32 = Readable::read(reader)?;
7688 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7689 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7690 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7691 let mut data = [0; 1024];
7692 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7693 reader.read_exact(read_slice)?;
7694 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7698 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7699 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7700 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7703 let destination_script = Readable::read(reader)?;
7705 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7706 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7707 let value_to_self_msat = Readable::read(reader)?;
7709 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7711 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7712 for _ in 0..pending_inbound_htlc_count {
7713 pending_inbound_htlcs.push(InboundHTLCOutput {
7714 htlc_id: Readable::read(reader)?,
7715 amount_msat: Readable::read(reader)?,
7716 cltv_expiry: Readable::read(reader)?,
7717 payment_hash: Readable::read(reader)?,
7718 state: match <u8 as Readable>::read(reader)? {
7719 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7720 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7721 3 => InboundHTLCState::Committed,
7722 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7723 _ => return Err(DecodeError::InvalidValue),
7728 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7729 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7730 for _ in 0..pending_outbound_htlc_count {
7731 pending_outbound_htlcs.push(OutboundHTLCOutput {
7732 htlc_id: Readable::read(reader)?,
7733 amount_msat: Readable::read(reader)?,
7734 cltv_expiry: Readable::read(reader)?,
7735 payment_hash: Readable::read(reader)?,
7736 source: Readable::read(reader)?,
7737 state: match <u8 as Readable>::read(reader)? {
7738 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7739 1 => OutboundHTLCState::Committed,
7741 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7742 OutboundHTLCState::RemoteRemoved(option.into())
7745 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7746 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7749 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7750 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7752 _ => return Err(DecodeError::InvalidValue),
7754 skimmed_fee_msat: None,
7755 blinding_point: None,
7759 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7760 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7761 for _ in 0..holding_cell_htlc_update_count {
7762 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7763 0 => HTLCUpdateAwaitingACK::AddHTLC {
7764 amount_msat: Readable::read(reader)?,
7765 cltv_expiry: Readable::read(reader)?,
7766 payment_hash: Readable::read(reader)?,
7767 source: Readable::read(reader)?,
7768 onion_routing_packet: Readable::read(reader)?,
7769 skimmed_fee_msat: None,
7770 blinding_point: None,
7772 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7773 payment_preimage: Readable::read(reader)?,
7774 htlc_id: Readable::read(reader)?,
7776 2 => HTLCUpdateAwaitingACK::FailHTLC {
7777 htlc_id: Readable::read(reader)?,
7778 err_packet: Readable::read(reader)?,
7780 _ => return Err(DecodeError::InvalidValue),
7784 let resend_order = match <u8 as Readable>::read(reader)? {
7785 0 => RAACommitmentOrder::CommitmentFirst,
7786 1 => RAACommitmentOrder::RevokeAndACKFirst,
7787 _ => return Err(DecodeError::InvalidValue),
7790 let monitor_pending_channel_ready = Readable::read(reader)?;
7791 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7792 let monitor_pending_commitment_signed = Readable::read(reader)?;
7794 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7795 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7796 for _ in 0..monitor_pending_forwards_count {
7797 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7800 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7801 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7802 for _ in 0..monitor_pending_failures_count {
7803 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7806 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7808 let holding_cell_update_fee = Readable::read(reader)?;
7810 let next_holder_htlc_id = Readable::read(reader)?;
7811 let next_counterparty_htlc_id = Readable::read(reader)?;
7812 let update_time_counter = Readable::read(reader)?;
7813 let feerate_per_kw = Readable::read(reader)?;
7815 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7816 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7817 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7818 // consider the stale state on reload.
7819 match <u8 as Readable>::read(reader)? {
7822 let _: u32 = Readable::read(reader)?;
7823 let _: u64 = Readable::read(reader)?;
7824 let _: Signature = Readable::read(reader)?;
7826 _ => return Err(DecodeError::InvalidValue),
7829 let funding_tx_confirmed_in = Readable::read(reader)?;
7830 let funding_tx_confirmation_height = Readable::read(reader)?;
7831 let short_channel_id = Readable::read(reader)?;
7833 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7834 let holder_dust_limit_satoshis = Readable::read(reader)?;
7835 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7836 let mut counterparty_selected_channel_reserve_satoshis = None;
7838 // Read the old serialization from version 0.0.98.
7839 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7841 // Read the 8 bytes of backwards-compatibility data.
7842 let _dummy: u64 = Readable::read(reader)?;
7844 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7845 let holder_htlc_minimum_msat = Readable::read(reader)?;
7846 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7848 let mut minimum_depth = None;
7850 // Read the old serialization from version 0.0.98.
7851 minimum_depth = Some(Readable::read(reader)?);
7853 // Read the 4 bytes of backwards-compatibility data.
7854 let _dummy: u32 = Readable::read(reader)?;
7857 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7859 1 => Some(CounterpartyForwardingInfo {
7860 fee_base_msat: Readable::read(reader)?,
7861 fee_proportional_millionths: Readable::read(reader)?,
7862 cltv_expiry_delta: Readable::read(reader)?,
7864 _ => return Err(DecodeError::InvalidValue),
7867 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7868 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7870 let counterparty_cur_commitment_point = Readable::read(reader)?;
7872 let counterparty_prev_commitment_point = Readable::read(reader)?;
7873 let counterparty_node_id = Readable::read(reader)?;
7875 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7876 let commitment_secrets = Readable::read(reader)?;
7878 let channel_update_status = Readable::read(reader)?;
7880 #[cfg(any(test, fuzzing))]
7881 let mut historical_inbound_htlc_fulfills = HashSet::new();
7882 #[cfg(any(test, fuzzing))]
7884 let htlc_fulfills_len: u64 = Readable::read(reader)?;
7885 for _ in 0..htlc_fulfills_len {
7886 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7890 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7891 Some((feerate, if channel_parameters.is_outbound_from_holder {
7892 FeeUpdateState::Outbound
7894 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7900 let mut announcement_sigs = None;
7901 let mut target_closing_feerate_sats_per_kw = None;
7902 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7903 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7904 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7905 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7906 // only, so we default to that if none was written.
7907 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7908 let mut channel_creation_height = Some(serialized_height);
7909 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7911 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7912 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7913 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7914 let mut latest_inbound_scid_alias = None;
7915 let mut outbound_scid_alias = None;
7916 let mut channel_pending_event_emitted = None;
7917 let mut channel_ready_event_emitted = None;
7919 let mut user_id_high_opt: Option<u64> = None;
7920 let mut channel_keys_id: Option<[u8; 32]> = None;
7921 let mut temporary_channel_id: Option<ChannelId> = None;
7922 let mut holder_max_accepted_htlcs: Option<u16> = None;
7924 let mut blocked_monitor_updates = Some(Vec::new());
7926 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7927 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7929 let mut is_batch_funding: Option<()> = None;
7931 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7932 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7934 read_tlv_fields!(reader, {
7935 (0, announcement_sigs, option),
7936 (1, minimum_depth, option),
7937 (2, channel_type, option),
7938 (3, counterparty_selected_channel_reserve_satoshis, option),
7939 (4, holder_selected_channel_reserve_satoshis, option),
7940 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7941 (6, holder_max_htlc_value_in_flight_msat, option),
7942 (7, shutdown_scriptpubkey, option),
7943 (8, blocked_monitor_updates, optional_vec),
7944 (9, target_closing_feerate_sats_per_kw, option),
7945 (11, monitor_pending_finalized_fulfills, optional_vec),
7946 (13, channel_creation_height, option),
7947 (15, preimages_opt, optional_vec),
7948 (17, announcement_sigs_state, option),
7949 (19, latest_inbound_scid_alias, option),
7950 (21, outbound_scid_alias, option),
7951 (23, channel_ready_event_emitted, option),
7952 (25, user_id_high_opt, option),
7953 (27, channel_keys_id, option),
7954 (28, holder_max_accepted_htlcs, option),
7955 (29, temporary_channel_id, option),
7956 (31, channel_pending_event_emitted, option),
7957 (35, pending_outbound_skimmed_fees_opt, optional_vec),
7958 (37, holding_cell_skimmed_fees_opt, optional_vec),
7959 (38, is_batch_funding, option),
7960 (39, pending_outbound_blinding_points_opt, optional_vec),
7961 (41, holding_cell_blinding_points_opt, optional_vec),
7964 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7965 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7966 // If we've gotten to the funding stage of the channel, populate the signer with its
7967 // required channel parameters.
7968 if channel_state >= ChannelState::FundingNegotiated {
7969 holder_signer.provide_channel_parameters(&channel_parameters);
7971 (channel_keys_id, holder_signer)
7973 // `keys_data` can be `None` if we had corrupted data.
7974 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7975 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7976 (holder_signer.channel_keys_id(), holder_signer)
7979 if let Some(preimages) = preimages_opt {
7980 let mut iter = preimages.into_iter();
7981 for htlc in pending_outbound_htlcs.iter_mut() {
7983 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7984 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7986 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7987 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7992 // We expect all preimages to be consumed above
7993 if iter.next().is_some() {
7994 return Err(DecodeError::InvalidValue);
7998 let chan_features = channel_type.as_ref().unwrap();
7999 if !chan_features.is_subset(our_supported_features) {
8000 // If the channel was written by a new version and negotiated with features we don't
8001 // understand yet, refuse to read it.
8002 return Err(DecodeError::UnknownRequiredFeature);
8005 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8006 // To account for that, we're proactively setting/overriding the field here.
8007 channel_parameters.channel_type_features = chan_features.clone();
8009 let mut secp_ctx = Secp256k1::new();
8010 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8012 // `user_id` used to be a single u64 value. In order to remain backwards
8013 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8014 // separate u64 values.
8015 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8017 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8019 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8020 let mut iter = skimmed_fees.into_iter();
8021 for htlc in pending_outbound_htlcs.iter_mut() {
8022 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8024 // We expect all skimmed fees to be consumed above
8025 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8027 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8028 let mut iter = skimmed_fees.into_iter();
8029 for htlc in holding_cell_htlc_updates.iter_mut() {
8030 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8031 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8034 // We expect all skimmed fees to be consumed above
8035 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8037 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8038 let mut iter = blinding_pts.into_iter();
8039 for htlc in pending_outbound_htlcs.iter_mut() {
8040 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8042 // We expect all blinding points to be consumed above
8043 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8045 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8046 let mut iter = blinding_pts.into_iter();
8047 for htlc in holding_cell_htlc_updates.iter_mut() {
8048 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8049 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8052 // We expect all blinding points to be consumed above
8053 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8057 context: ChannelContext {
8060 config: config.unwrap(),
8064 // Note that we don't care about serializing handshake limits as we only ever serialize
8065 // channel data after the handshake has completed.
8066 inbound_handshake_limits_override: None,
8069 temporary_channel_id,
8071 announcement_sigs_state: announcement_sigs_state.unwrap(),
8073 channel_value_satoshis,
8075 latest_monitor_update_id,
8077 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8078 shutdown_scriptpubkey,
8081 cur_holder_commitment_transaction_number,
8082 cur_counterparty_commitment_transaction_number,
8085 holder_max_accepted_htlcs,
8086 pending_inbound_htlcs,
8087 pending_outbound_htlcs,
8088 holding_cell_htlc_updates,
8092 monitor_pending_channel_ready,
8093 monitor_pending_revoke_and_ack,
8094 monitor_pending_commitment_signed,
8095 monitor_pending_forwards,
8096 monitor_pending_failures,
8097 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8099 signer_pending_commitment_update: false,
8100 signer_pending_funding: false,
8103 holding_cell_update_fee,
8104 next_holder_htlc_id,
8105 next_counterparty_htlc_id,
8106 update_time_counter,
8109 #[cfg(debug_assertions)]
8110 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8111 #[cfg(debug_assertions)]
8112 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8114 last_sent_closing_fee: None,
8115 pending_counterparty_closing_signed: None,
8116 expecting_peer_commitment_signed: false,
8117 closing_fee_limits: None,
8118 target_closing_feerate_sats_per_kw,
8120 funding_tx_confirmed_in,
8121 funding_tx_confirmation_height,
8123 channel_creation_height: channel_creation_height.unwrap(),
8125 counterparty_dust_limit_satoshis,
8126 holder_dust_limit_satoshis,
8127 counterparty_max_htlc_value_in_flight_msat,
8128 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8129 counterparty_selected_channel_reserve_satoshis,
8130 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8131 counterparty_htlc_minimum_msat,
8132 holder_htlc_minimum_msat,
8133 counterparty_max_accepted_htlcs,
8136 counterparty_forwarding_info,
8138 channel_transaction_parameters: channel_parameters,
8139 funding_transaction,
8142 counterparty_cur_commitment_point,
8143 counterparty_prev_commitment_point,
8144 counterparty_node_id,
8146 counterparty_shutdown_scriptpubkey,
8150 channel_update_status,
8151 closing_signed_in_flight: false,
8155 #[cfg(any(test, fuzzing))]
8156 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8157 #[cfg(any(test, fuzzing))]
8158 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8160 workaround_lnd_bug_4006: None,
8161 sent_message_awaiting_response: None,
8163 latest_inbound_scid_alias,
8164 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8165 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8167 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8168 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8170 #[cfg(any(test, fuzzing))]
8171 historical_inbound_htlc_fulfills,
8173 channel_type: channel_type.unwrap(),
8176 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8185 use bitcoin::blockdata::constants::ChainHash;
8186 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8187 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8188 use bitcoin::blockdata::opcodes;
8189 use bitcoin::network::constants::Network;
8190 use crate::ln::{PaymentHash, PaymentPreimage};
8191 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8192 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8193 use crate::ln::channel::InitFeatures;
8194 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8195 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8196 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8197 use crate::ln::msgs;
8198 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8199 use crate::ln::script::ShutdownScript;
8200 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8201 use crate::chain::BestBlock;
8202 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8203 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8204 use crate::chain::transaction::OutPoint;
8205 use crate::routing::router::{Path, RouteHop};
8206 use crate::util::config::UserConfig;
8207 use crate::util::errors::APIError;
8208 use crate::util::ser::{ReadableArgs, Writeable};
8209 use crate::util::test_utils;
8210 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8211 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8212 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8213 use bitcoin::secp256k1::{SecretKey,PublicKey};
8214 use bitcoin::hashes::sha256::Hash as Sha256;
8215 use bitcoin::hashes::Hash;
8216 use bitcoin::hashes::hex::FromHex;
8217 use bitcoin::hash_types::WPubkeyHash;
8218 use bitcoin::blockdata::locktime::absolute::LockTime;
8219 use bitcoin::address::{WitnessProgram, WitnessVersion};
8220 use crate::prelude::*;
8222 struct TestFeeEstimator {
8225 impl FeeEstimator for TestFeeEstimator {
8226 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8232 fn test_max_funding_satoshis_no_wumbo() {
8233 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8234 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8235 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8239 signer: InMemorySigner,
8242 impl EntropySource for Keys {
8243 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8246 impl SignerProvider for Keys {
8247 type EcdsaSigner = InMemorySigner;
8249 type TaprootSigner = InMemorySigner;
8251 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8252 self.signer.channel_keys_id()
8255 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8259 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8261 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8262 let secp_ctx = Secp256k1::signing_only();
8263 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8264 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8265 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8268 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8269 let secp_ctx = Secp256k1::signing_only();
8270 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8271 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8275 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8276 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8277 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8281 fn upfront_shutdown_script_incompatibility() {
8282 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8283 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8284 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8287 let seed = [42; 32];
8288 let network = Network::Testnet;
8289 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8290 keys_provider.expect(OnGetShutdownScriptpubkey {
8291 returns: non_v0_segwit_shutdown_script.clone(),
8294 let secp_ctx = Secp256k1::new();
8295 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8296 let config = UserConfig::default();
8297 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8298 Err(APIError::IncompatibleShutdownScript { script }) => {
8299 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8301 Err(e) => panic!("Unexpected error: {:?}", e),
8302 Ok(_) => panic!("Expected error"),
8306 // Check that, during channel creation, we use the same feerate in the open channel message
8307 // as we do in the Channel object creation itself.
8309 fn test_open_channel_msg_fee() {
8310 let original_fee = 253;
8311 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8312 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8313 let secp_ctx = Secp256k1::new();
8314 let seed = [42; 32];
8315 let network = Network::Testnet;
8316 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8318 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8319 let config = UserConfig::default();
8320 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8322 // Now change the fee so we can check that the fee in the open_channel message is the
8323 // same as the old fee.
8324 fee_est.fee_est = 500;
8325 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8326 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8330 fn test_holder_vs_counterparty_dust_limit() {
8331 // Test that when calculating the local and remote commitment transaction fees, the correct
8332 // dust limits are used.
8333 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8334 let secp_ctx = Secp256k1::new();
8335 let seed = [42; 32];
8336 let network = Network::Testnet;
8337 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8338 let logger = test_utils::TestLogger::new();
8339 let best_block = BestBlock::from_network(network);
8341 // Go through the flow of opening a channel between two nodes, making sure
8342 // they have different dust limits.
8344 // Create Node A's channel pointing to Node B's pubkey
8345 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8346 let config = UserConfig::default();
8347 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8349 // Create Node B's channel by receiving Node A's open_channel message
8350 // Make sure A's dust limit is as we expect.
8351 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8352 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8353 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8355 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8356 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8357 accept_channel_msg.dust_limit_satoshis = 546;
8358 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8359 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8361 // Node A --> Node B: funding created
8362 let output_script = node_a_chan.context.get_funding_redeemscript();
8363 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8364 value: 10000000, script_pubkey: output_script.clone(),
8366 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8367 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8368 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8370 // Node B --> Node A: funding signed
8371 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8373 // Put some inbound and outbound HTLCs in A's channel.
8374 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8375 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8377 amount_msat: htlc_amount_msat,
8378 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8379 cltv_expiry: 300000000,
8380 state: InboundHTLCState::Committed,
8383 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8385 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8386 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8387 cltv_expiry: 200000000,
8388 state: OutboundHTLCState::Committed,
8389 source: HTLCSource::OutboundRoute {
8390 path: Path { hops: Vec::new(), blinded_tail: None },
8391 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8392 first_hop_htlc_msat: 548,
8393 payment_id: PaymentId([42; 32]),
8395 skimmed_fee_msat: None,
8396 blinding_point: None,
8399 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8400 // the dust limit check.
8401 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8402 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8403 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8404 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8406 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8407 // of the HTLCs are seen to be above the dust limit.
8408 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8409 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8410 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8411 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8412 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8416 fn test_timeout_vs_success_htlc_dust_limit() {
8417 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8418 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8419 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8420 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8421 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8422 let secp_ctx = Secp256k1::new();
8423 let seed = [42; 32];
8424 let network = Network::Testnet;
8425 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8427 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8428 let config = UserConfig::default();
8429 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8431 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8432 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8434 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8435 // counted as dust when it shouldn't be.
8436 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8437 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8438 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8439 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8441 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8442 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8443 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8444 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8445 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8447 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8449 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8450 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8451 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8452 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8453 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8455 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8456 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8457 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8458 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8459 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8463 fn channel_reestablish_no_updates() {
8464 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8465 let logger = test_utils::TestLogger::new();
8466 let secp_ctx = Secp256k1::new();
8467 let seed = [42; 32];
8468 let network = Network::Testnet;
8469 let best_block = BestBlock::from_network(network);
8470 let chain_hash = ChainHash::using_genesis_block(network);
8471 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8473 // Go through the flow of opening a channel between two nodes.
8475 // Create Node A's channel pointing to Node B's pubkey
8476 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8477 let config = UserConfig::default();
8478 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8480 // Create Node B's channel by receiving Node A's open_channel message
8481 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8482 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8483 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8485 // Node B --> Node A: accept channel
8486 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8487 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8489 // Node A --> Node B: funding created
8490 let output_script = node_a_chan.context.get_funding_redeemscript();
8491 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8492 value: 10000000, script_pubkey: output_script.clone(),
8494 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8495 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8496 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8498 // Node B --> Node A: funding signed
8499 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8501 // Now disconnect the two nodes and check that the commitment point in
8502 // Node B's channel_reestablish message is sane.
8503 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8504 let msg = node_b_chan.get_channel_reestablish(&&logger);
8505 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8506 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8507 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8509 // Check that the commitment point in Node A's channel_reestablish message
8511 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8512 let msg = node_a_chan.get_channel_reestablish(&&logger);
8513 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8514 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8515 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8519 fn test_configured_holder_max_htlc_value_in_flight() {
8520 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8521 let logger = test_utils::TestLogger::new();
8522 let secp_ctx = Secp256k1::new();
8523 let seed = [42; 32];
8524 let network = Network::Testnet;
8525 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8526 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8527 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8529 let mut config_2_percent = UserConfig::default();
8530 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8531 let mut config_99_percent = UserConfig::default();
8532 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8533 let mut config_0_percent = UserConfig::default();
8534 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8535 let mut config_101_percent = UserConfig::default();
8536 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8538 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8539 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8540 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8541 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8542 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8543 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8545 // Test with the upper bound - 1 of valid values (99%).
8546 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8547 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8548 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8550 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8552 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8553 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8554 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8555 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8556 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8557 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8559 // Test with the upper bound - 1 of valid values (99%).
8560 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8561 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8562 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8564 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8565 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8566 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8567 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8568 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8570 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8571 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8573 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8574 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8575 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8577 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8578 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8579 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8580 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8581 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8583 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8584 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8586 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8587 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8588 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8592 fn test_configured_holder_selected_channel_reserve_satoshis() {
8594 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8595 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8596 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8598 // Test with valid but unreasonably high channel reserves
8599 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8600 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8601 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8603 // Test with calculated channel reserve less than lower bound
8604 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8605 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8607 // Test with invalid channel reserves since sum of both is greater than or equal
8609 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8610 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8613 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8614 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8615 let logger = test_utils::TestLogger::new();
8616 let secp_ctx = Secp256k1::new();
8617 let seed = [42; 32];
8618 let network = Network::Testnet;
8619 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8620 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8621 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8624 let mut outbound_node_config = UserConfig::default();
8625 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8626 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8628 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8629 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8631 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8632 let mut inbound_node_config = UserConfig::default();
8633 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8635 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8636 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8638 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8640 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8641 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8643 // Channel Negotiations failed
8644 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8645 assert!(result.is_err());
8650 fn channel_update() {
8651 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8652 let logger = test_utils::TestLogger::new();
8653 let secp_ctx = Secp256k1::new();
8654 let seed = [42; 32];
8655 let network = Network::Testnet;
8656 let best_block = BestBlock::from_network(network);
8657 let chain_hash = ChainHash::using_genesis_block(network);
8658 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8660 // Create Node A's channel pointing to Node B's pubkey
8661 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8662 let config = UserConfig::default();
8663 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8665 // Create Node B's channel by receiving Node A's open_channel message
8666 // Make sure A's dust limit is as we expect.
8667 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8668 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8669 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8671 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8672 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8673 accept_channel_msg.dust_limit_satoshis = 546;
8674 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8675 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8677 // Node A --> Node B: funding created
8678 let output_script = node_a_chan.context.get_funding_redeemscript();
8679 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8680 value: 10000000, script_pubkey: output_script.clone(),
8682 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8683 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8684 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8686 // Node B --> Node A: funding signed
8687 let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap();
8689 // Make sure that receiving a channel update will update the Channel as expected.
8690 let update = ChannelUpdate {
8691 contents: UnsignedChannelUpdate {
8693 short_channel_id: 0,
8696 cltv_expiry_delta: 100,
8697 htlc_minimum_msat: 5,
8698 htlc_maximum_msat: MAX_VALUE_MSAT,
8700 fee_proportional_millionths: 11,
8701 excess_data: Vec::new(),
8703 signature: Signature::from(unsafe { FFISignature::new() })
8705 assert!(node_a_chan.channel_update(&update).unwrap());
8707 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8708 // change our official htlc_minimum_msat.
8709 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8710 match node_a_chan.context.counterparty_forwarding_info() {
8712 assert_eq!(info.cltv_expiry_delta, 100);
8713 assert_eq!(info.fee_base_msat, 110);
8714 assert_eq!(info.fee_proportional_millionths, 11);
8716 None => panic!("expected counterparty forwarding info to be Some")
8719 assert!(!node_a_chan.channel_update(&update).unwrap());
8723 fn blinding_point_skimmed_fee_ser() {
8724 // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
8725 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8726 let secp_ctx = Secp256k1::new();
8727 let seed = [42; 32];
8728 let network = Network::Testnet;
8729 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8731 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8732 let config = UserConfig::default();
8733 let features = channelmanager::provided_init_features(&config);
8734 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8735 let mut chan = Channel { context: outbound_chan.context };
8737 let dummy_htlc_source = HTLCSource::OutboundRoute {
8739 hops: vec![RouteHop {
8740 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8741 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8742 cltv_expiry_delta: 0, maybe_announced_channel: false,
8746 session_priv: test_utils::privkey(42),
8747 first_hop_htlc_msat: 0,
8748 payment_id: PaymentId([42; 32]),
8750 let dummy_outbound_output = OutboundHTLCOutput {
8753 payment_hash: PaymentHash([43; 32]),
8755 state: OutboundHTLCState::Committed,
8756 source: dummy_htlc_source.clone(),
8757 skimmed_fee_msat: None,
8758 blinding_point: None,
8760 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8761 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8763 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8766 htlc.skimmed_fee_msat = Some(1);
8769 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8771 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8774 payment_hash: PaymentHash([43; 32]),
8775 source: dummy_htlc_source.clone(),
8776 onion_routing_packet: msgs::OnionPacket {
8778 public_key: Ok(test_utils::pubkey(1)),
8779 hop_data: [0; 20*65],
8782 skimmed_fee_msat: None,
8783 blinding_point: None,
8785 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8786 payment_preimage: PaymentPreimage([42; 32]),
8789 let mut holding_cell_htlc_updates = Vec::with_capacity(10);
8792 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8793 } else if i % 3 == 1 {
8794 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8796 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8797 if let HTLCUpdateAwaitingACK::AddHTLC {
8798 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8799 } = &mut dummy_add {
8800 *blinding_point = Some(test_utils::pubkey(42 + i));
8801 *skimmed_fee_msat = Some(42);
8803 holding_cell_htlc_updates.push(dummy_add);
8806 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8808 // Encode and decode the channel and ensure that the HTLCs within are the same.
8809 let encoded_chan = chan.encode();
8810 let mut s = crate::io::Cursor::new(&encoded_chan);
8811 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8812 let features = channelmanager::provided_channel_type_features(&config);
8813 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8814 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8815 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8818 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8820 fn outbound_commitment_test() {
8821 use bitcoin::sighash;
8822 use bitcoin::consensus::encode::serialize;
8823 use bitcoin::sighash::EcdsaSighashType;
8824 use bitcoin::hashes::hex::FromHex;
8825 use bitcoin::hash_types::Txid;
8826 use bitcoin::secp256k1::Message;
8827 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8828 use crate::ln::PaymentPreimage;
8829 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8830 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8831 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8832 use crate::util::logger::Logger;
8833 use crate::sync::Arc;
8834 use core::str::FromStr;
8835 use hex::DisplayHex;
8837 // Test vectors from BOLT 3 Appendices C and F (anchors):
8838 let feeest = TestFeeEstimator{fee_est: 15000};
8839 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
8840 let secp_ctx = Secp256k1::new();
8842 let mut signer = InMemorySigner::new(
8844 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8845 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8846 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8847 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8848 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8850 // These aren't set in the test vectors:
8851 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8857 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8858 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8859 let keys_provider = Keys { signer: signer.clone() };
8861 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8862 let mut config = UserConfig::default();
8863 config.channel_handshake_config.announced_channel = false;
8864 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8865 chan.context.holder_dust_limit_satoshis = 546;
8866 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8868 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8870 let counterparty_pubkeys = ChannelPublicKeys {
8871 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8872 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8873 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8874 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8875 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8877 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8878 CounterpartyChannelTransactionParameters {
8879 pubkeys: counterparty_pubkeys.clone(),
8880 selected_contest_delay: 144
8882 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8883 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8885 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8886 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8888 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8889 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8891 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8892 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8894 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8895 // derived from a commitment_seed, so instead we copy it here and call
8896 // build_commitment_transaction.
8897 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8898 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8899 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8900 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8901 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8903 macro_rules! test_commitment {
8904 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8905 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8906 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8910 macro_rules! test_commitment_with_anchors {
8911 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8912 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8913 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8917 macro_rules! test_commitment_common {
8918 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8919 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8921 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8922 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8924 let htlcs = commitment_stats.htlcs_included.drain(..)
8925 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8927 (commitment_stats.tx, htlcs)
8929 let trusted_tx = commitment_tx.trust();
8930 let unsigned_tx = trusted_tx.built_transaction();
8931 let redeemscript = chan.context.get_funding_redeemscript();
8932 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8933 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8934 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8935 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8937 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8938 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8939 let mut counterparty_htlc_sigs = Vec::new();
8940 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8942 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8943 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8944 counterparty_htlc_sigs.push(remote_signature);
8946 assert_eq!(htlcs.len(), per_htlc.len());
8948 let holder_commitment_tx = HolderCommitmentTransaction::new(
8949 commitment_tx.clone(),
8950 counterparty_signature,
8951 counterparty_htlc_sigs,
8952 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8953 chan.context.counterparty_funding_pubkey()
8955 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8956 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8958 let funding_redeemscript = chan.context.get_funding_redeemscript();
8959 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8960 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8962 // ((htlc, counterparty_sig), (index, holder_sig))
8963 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8966 log_trace!(logger, "verifying htlc {}", $htlc_idx);
8967 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8969 let ref htlc = htlcs[$htlc_idx];
8970 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8971 chan.context.get_counterparty_selected_contest_delay().unwrap(),
8972 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
8973 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
8974 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
8975 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
8976 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
8978 let mut preimage: Option<PaymentPreimage> = None;
8981 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
8982 if out == htlc.payment_hash {
8983 preimage = Some(PaymentPreimage([i; 32]));
8987 assert!(preimage.is_some());
8990 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
8991 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
8992 channel_derivation_parameters: ChannelDerivationParameters {
8993 value_satoshis: chan.context.channel_value_satoshis,
8994 keys_id: chan.context.channel_keys_id,
8995 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
8997 commitment_txid: trusted_tx.txid(),
8998 per_commitment_number: trusted_tx.commitment_number(),
8999 per_commitment_point: trusted_tx.per_commitment_point(),
9000 feerate_per_kw: trusted_tx.feerate_per_kw(),
9002 preimage: preimage.clone(),
9003 counterparty_sig: *htlc_counterparty_sig,
9004 }, &secp_ctx).unwrap();
9005 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9006 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9008 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9009 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9010 let trusted_tx = holder_commitment_tx.trust();
9011 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9012 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9013 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9015 assert!(htlc_counterparty_sig_iter.next().is_none());
9019 // anchors: simple commitment tx with no HTLCs and single anchor
9020 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9021 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9022 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9024 // simple commitment tx with no HTLCs
9025 chan.context.value_to_self_msat = 7000000000;
9027 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9028 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9029 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9031 // anchors: simple commitment tx with no HTLCs
9032 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9033 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9034 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9036 chan.context.pending_inbound_htlcs.push({
9037 let mut out = InboundHTLCOutput{
9039 amount_msat: 1000000,
9041 payment_hash: PaymentHash([0; 32]),
9042 state: InboundHTLCState::Committed,
9044 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9047 chan.context.pending_inbound_htlcs.push({
9048 let mut out = InboundHTLCOutput{
9050 amount_msat: 2000000,
9052 payment_hash: PaymentHash([0; 32]),
9053 state: InboundHTLCState::Committed,
9055 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9058 chan.context.pending_outbound_htlcs.push({
9059 let mut out = OutboundHTLCOutput{
9061 amount_msat: 2000000,
9063 payment_hash: PaymentHash([0; 32]),
9064 state: OutboundHTLCState::Committed,
9065 source: HTLCSource::dummy(),
9066 skimmed_fee_msat: None,
9067 blinding_point: None,
9069 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9072 chan.context.pending_outbound_htlcs.push({
9073 let mut out = OutboundHTLCOutput{
9075 amount_msat: 3000000,
9077 payment_hash: PaymentHash([0; 32]),
9078 state: OutboundHTLCState::Committed,
9079 source: HTLCSource::dummy(),
9080 skimmed_fee_msat: None,
9081 blinding_point: None,
9083 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9086 chan.context.pending_inbound_htlcs.push({
9087 let mut out = InboundHTLCOutput{
9089 amount_msat: 4000000,
9091 payment_hash: PaymentHash([0; 32]),
9092 state: InboundHTLCState::Committed,
9094 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9098 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9099 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9100 chan.context.feerate_per_kw = 0;
9102 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9103 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9104 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9107 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9108 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9109 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9112 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9113 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9114 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9117 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9118 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9119 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9122 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9123 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9124 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9127 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9128 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9129 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9132 // commitment tx with seven outputs untrimmed (maximum feerate)
9133 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9134 chan.context.feerate_per_kw = 647;
9136 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9137 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9138 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9141 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9142 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9143 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9146 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9147 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9148 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9151 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9152 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9153 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9156 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9157 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9158 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9161 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9162 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9163 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9166 // commitment tx with six outputs untrimmed (minimum feerate)
9167 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9168 chan.context.feerate_per_kw = 648;
9170 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9171 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9172 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9175 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9176 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9177 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9180 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9181 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9182 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9185 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9186 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9187 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9190 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9191 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9192 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9195 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9196 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9197 chan.context.feerate_per_kw = 645;
9198 chan.context.holder_dust_limit_satoshis = 1001;
9200 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9201 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9202 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9205 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9206 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9207 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9210 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9211 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9212 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9215 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9216 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9217 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9220 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9221 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9222 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9225 // commitment tx with six outputs untrimmed (maximum feerate)
9226 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9227 chan.context.feerate_per_kw = 2069;
9228 chan.context.holder_dust_limit_satoshis = 546;
9230 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9231 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9232 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9235 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9236 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9237 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9240 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9241 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9242 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9245 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9246 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9247 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9250 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9251 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9252 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9255 // commitment tx with five outputs untrimmed (minimum feerate)
9256 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9257 chan.context.feerate_per_kw = 2070;
9259 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9260 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9261 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9264 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9265 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9266 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9269 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9270 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9271 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9274 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9275 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9276 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9279 // commitment tx with five outputs untrimmed (maximum feerate)
9280 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9281 chan.context.feerate_per_kw = 2194;
9283 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9284 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9285 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9288 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9289 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9290 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9293 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9294 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9295 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9298 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9299 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9300 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9303 // commitment tx with four outputs untrimmed (minimum feerate)
9304 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9305 chan.context.feerate_per_kw = 2195;
9307 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9308 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9309 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9312 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9313 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9314 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9317 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9318 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9319 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9322 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9323 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9324 chan.context.feerate_per_kw = 2185;
9325 chan.context.holder_dust_limit_satoshis = 2001;
9326 let cached_channel_type = chan.context.channel_type;
9327 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9329 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9330 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9331 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9334 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9335 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9336 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9339 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9340 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9341 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9344 // commitment tx with four outputs untrimmed (maximum feerate)
9345 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9346 chan.context.feerate_per_kw = 3702;
9347 chan.context.holder_dust_limit_satoshis = 546;
9348 chan.context.channel_type = cached_channel_type.clone();
9350 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9351 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9352 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9355 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9356 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9357 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9360 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9361 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9362 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9365 // commitment tx with three outputs untrimmed (minimum feerate)
9366 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9367 chan.context.feerate_per_kw = 3703;
9369 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9370 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9371 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9374 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9375 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9376 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9379 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9380 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9381 chan.context.feerate_per_kw = 3687;
9382 chan.context.holder_dust_limit_satoshis = 3001;
9383 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9385 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9386 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9387 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9390 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9391 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9392 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9395 // commitment tx with three outputs untrimmed (maximum feerate)
9396 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9397 chan.context.feerate_per_kw = 4914;
9398 chan.context.holder_dust_limit_satoshis = 546;
9399 chan.context.channel_type = cached_channel_type.clone();
9401 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9402 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9403 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9406 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9407 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9408 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9411 // commitment tx with two outputs untrimmed (minimum feerate)
9412 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9413 chan.context.feerate_per_kw = 4915;
9414 chan.context.holder_dust_limit_satoshis = 546;
9416 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9417 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9418 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9420 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9421 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9422 chan.context.feerate_per_kw = 4894;
9423 chan.context.holder_dust_limit_satoshis = 4001;
9424 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9426 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9427 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9428 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9430 // commitment tx with two outputs untrimmed (maximum feerate)
9431 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9432 chan.context.feerate_per_kw = 9651180;
9433 chan.context.holder_dust_limit_satoshis = 546;
9434 chan.context.channel_type = cached_channel_type.clone();
9436 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9437 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9438 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9440 // commitment tx with one output untrimmed (minimum feerate)
9441 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9442 chan.context.feerate_per_kw = 9651181;
9444 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9445 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9446 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9448 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9449 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9450 chan.context.feerate_per_kw = 6216010;
9451 chan.context.holder_dust_limit_satoshis = 4001;
9452 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9454 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9455 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9456 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9458 // commitment tx with fee greater than funder amount
9459 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9460 chan.context.feerate_per_kw = 9651936;
9461 chan.context.holder_dust_limit_satoshis = 546;
9462 chan.context.channel_type = cached_channel_type;
9464 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9465 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9466 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9468 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9469 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9470 chan.context.feerate_per_kw = 253;
9471 chan.context.pending_inbound_htlcs.clear();
9472 chan.context.pending_inbound_htlcs.push({
9473 let mut out = InboundHTLCOutput{
9475 amount_msat: 2000000,
9477 payment_hash: PaymentHash([0; 32]),
9478 state: InboundHTLCState::Committed,
9480 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9483 chan.context.pending_outbound_htlcs.clear();
9484 chan.context.pending_outbound_htlcs.push({
9485 let mut out = OutboundHTLCOutput{
9487 amount_msat: 5000001,
9489 payment_hash: PaymentHash([0; 32]),
9490 state: OutboundHTLCState::Committed,
9491 source: HTLCSource::dummy(),
9492 skimmed_fee_msat: None,
9493 blinding_point: None,
9495 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9498 chan.context.pending_outbound_htlcs.push({
9499 let mut out = OutboundHTLCOutput{
9501 amount_msat: 5000000,
9503 payment_hash: PaymentHash([0; 32]),
9504 state: OutboundHTLCState::Committed,
9505 source: HTLCSource::dummy(),
9506 skimmed_fee_msat: None,
9507 blinding_point: None,
9509 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9513 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9514 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9515 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9518 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9519 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9520 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9522 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9523 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9524 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9526 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9527 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9528 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9531 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9532 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9533 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9534 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9537 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9538 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9539 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9541 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9542 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9543 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9545 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9546 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9547 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9552 fn test_per_commitment_secret_gen() {
9553 // Test vectors from BOLT 3 Appendix D:
9555 let mut seed = [0; 32];
9556 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9557 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9558 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9560 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9561 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9562 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9564 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9565 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9567 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9568 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9570 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9571 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9572 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9576 fn test_key_derivation() {
9577 // Test vectors from BOLT 3 Appendix E:
9578 let secp_ctx = Secp256k1::new();
9580 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9581 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9583 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9584 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9586 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9587 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9589 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9590 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9592 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9593 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9595 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9596 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9600 fn test_zero_conf_channel_type_support() {
9601 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9602 let secp_ctx = Secp256k1::new();
9603 let seed = [42; 32];
9604 let network = Network::Testnet;
9605 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9606 let logger = test_utils::TestLogger::new();
9608 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9609 let config = UserConfig::default();
9610 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9611 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9613 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9614 channel_type_features.set_zero_conf_required();
9616 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9617 open_channel_msg.channel_type = Some(channel_type_features);
9618 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9619 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9620 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9621 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9622 assert!(res.is_ok());
9626 fn test_supports_anchors_zero_htlc_tx_fee() {
9627 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9628 // resulting `channel_type`.
9629 let secp_ctx = Secp256k1::new();
9630 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9631 let network = Network::Testnet;
9632 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9633 let logger = test_utils::TestLogger::new();
9635 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9636 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9638 let mut config = UserConfig::default();
9639 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9641 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9642 // need to signal it.
9643 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9644 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9645 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9646 &config, 0, 42, None
9648 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9650 let mut expected_channel_type = ChannelTypeFeatures::empty();
9651 expected_channel_type.set_static_remote_key_required();
9652 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9654 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9655 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9656 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9660 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9661 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9662 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9663 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9664 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9667 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9668 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9672 fn test_rejects_implicit_simple_anchors() {
9673 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9674 // each side's `InitFeatures`, it is rejected.
9675 let secp_ctx = Secp256k1::new();
9676 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9677 let network = Network::Testnet;
9678 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9679 let logger = test_utils::TestLogger::new();
9681 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9682 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9684 let config = UserConfig::default();
9686 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9687 let static_remote_key_required: u64 = 1 << 12;
9688 let simple_anchors_required: u64 = 1 << 20;
9689 let raw_init_features = static_remote_key_required | simple_anchors_required;
9690 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9692 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9693 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9694 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9698 // Set `channel_type` to `None` to force the implicit feature negotiation.
9699 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9700 open_channel_msg.channel_type = None;
9702 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9703 // `static_remote_key`, it will fail the channel.
9704 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9705 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9706 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9707 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9709 assert!(channel_b.is_err());
9713 fn test_rejects_simple_anchors_channel_type() {
9714 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9716 let secp_ctx = Secp256k1::new();
9717 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9718 let network = Network::Testnet;
9719 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9720 let logger = test_utils::TestLogger::new();
9722 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9723 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9725 let config = UserConfig::default();
9727 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9728 let static_remote_key_required: u64 = 1 << 12;
9729 let simple_anchors_required: u64 = 1 << 20;
9730 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9731 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9732 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9733 assert!(!simple_anchors_init.requires_unknown_bits());
9734 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9736 // First, we'll try to open a channel between A and B where A requests a channel type for
9737 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9738 // B as it's not supported by LDK.
9739 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9740 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9741 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9745 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9746 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9748 let res = InboundV1Channel::<&TestKeysInterface>::new(
9749 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9750 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9751 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9753 assert!(res.is_err());
9755 // Then, we'll try to open another channel where A requests a channel type for
9756 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9757 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9759 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9760 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9761 10000000, 100000, 42, &config, 0, 42, None
9764 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9766 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9767 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9768 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9769 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9772 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9773 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9775 let res = channel_a.accept_channel(
9776 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9778 assert!(res.is_err());
9782 fn test_waiting_for_batch() {
9783 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9784 let logger = test_utils::TestLogger::new();
9785 let secp_ctx = Secp256k1::new();
9786 let seed = [42; 32];
9787 let network = Network::Testnet;
9788 let best_block = BestBlock::from_network(network);
9789 let chain_hash = ChainHash::using_genesis_block(network);
9790 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9792 let mut config = UserConfig::default();
9793 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9794 // channel in a batch before all channels are ready.
9795 config.channel_handshake_limits.trust_own_funding_0conf = true;
9797 // Create a channel from node a to node b that will be part of batch funding.
9798 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9799 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9804 &channelmanager::provided_init_features(&config),
9814 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9815 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9816 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9821 &channelmanager::provided_channel_type_features(&config),
9822 &channelmanager::provided_init_features(&config),
9828 true, // Allow node b to send a 0conf channel_ready.
9831 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9832 node_a_chan.accept_channel(
9833 &accept_channel_msg,
9834 &config.channel_handshake_limits,
9835 &channelmanager::provided_init_features(&config),
9838 // Fund the channel with a batch funding transaction.
9839 let output_script = node_a_chan.context.get_funding_redeemscript();
9840 let tx = Transaction {
9842 lock_time: LockTime::ZERO,
9846 value: 10000000, script_pubkey: output_script.clone(),
9849 value: 10000000, script_pubkey: Builder::new().into_script(),
9852 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9853 let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(
9858 ).map_err(|_| ()).unwrap();
9859 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9860 &funding_created_msg.unwrap(),
9864 ).map_err(|_| ()).unwrap();
9865 let node_b_updates = node_b_chan.monitor_updating_restored(
9873 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9874 // broadcasting the funding transaction until the batch is ready.
9875 let _ = node_a_chan.funding_signed(
9876 &funding_signed_msg.unwrap(),
9881 let node_a_updates = node_a_chan.monitor_updating_restored(
9888 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9889 // as the funding transaction depends on all channels in the batch becoming ready.
9890 assert!(node_a_updates.channel_ready.is_none());
9891 assert!(node_a_updates.funding_broadcastable.is_none());
9892 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
9894 // It is possible to receive a 0conf channel_ready from the remote node.
9895 node_a_chan.channel_ready(
9896 &node_b_updates.channel_ready.unwrap(),
9904 node_a_chan.context.channel_state,
9905 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
9908 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9909 node_a_chan.set_batch_ready();
9910 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
9911 assert!(node_a_chan.check_get_channel_ready(0).is_some());