1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
304 impl core::ops::Not for $flag_type {
306 fn not(self) -> Self::Output { Self(!self.0) }
308 impl core::ops::BitOr for $flag_type {
310 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
312 impl core::ops::BitOrAssign for $flag_type {
313 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
315 impl core::ops::BitAnd for $flag_type {
317 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
319 impl core::ops::BitAndAssign for $flag_type {
320 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
323 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
326 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
330 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
332 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
335 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
337 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
339 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
342 impl PartialEq<FundedStateFlags> for $flag_type {
343 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
345 impl From<FundedStateFlags> for $flag_type {
346 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
351 /// We declare all the states/flags here together to help determine which bits are still available
354 pub const OUR_INIT_SENT: u32 = 1 << 0;
355 pub const THEIR_INIT_SENT: u32 = 1 << 1;
356 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360 pub const CHANNEL_READY: u32 = 1 << 6;
361 pub const PEER_DISCONNECTED: u32 = 1 << 7;
362 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
371 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
373 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376 somewhere and we should pause sending any outbound messages until they've managed to \
377 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
387 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388 NegotiatingFundingFlags, [
389 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
397 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398 FUNDED_STATE, AwaitingChannelReadyFlags, [
399 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406 is being held until all channels in the batch have received `funding_signed` and have \
407 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
412 "Flags that only apply to [`ChannelState::ChannelReady`].",
413 FUNDED_STATE, ChannelReadyFlags, [
414 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
422 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
423 // into account when introducing new states and update `test_channel_state_order` accordingly.
424 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
426 /// We are negotiating the parameters required for the channel prior to funding it.
427 NegotiatingFunding(NegotiatingFundingFlags),
428 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
429 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
430 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
432 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
433 /// funding transaction to confirm.
434 AwaitingChannelReady(AwaitingChannelReadyFlags),
435 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
437 ChannelReady(ChannelReadyFlags),
438 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
439 /// is about to drop us, but we store this anyway.
443 macro_rules! impl_state_flag {
444 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
446 fn $get(&self) -> bool {
449 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
458 ChannelState::$state(flags) => *flags |= $state_flag,
460 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
464 fn $clear(&mut self) {
467 ChannelState::$state(flags) => *flags &= !($state_flag),
469 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
473 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
474 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
476 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
477 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
482 fn from_u32(state: u32) -> Result<Self, ()> {
484 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
485 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
487 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
488 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
489 .map(|flags| ChannelState::AwaitingChannelReady(flags))
490 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
491 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
492 .map(|flags| ChannelState::ChannelReady(flags))
493 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
494 Ok(ChannelState::NegotiatingFunding(flags))
502 fn to_u32(&self) -> u32 {
504 ChannelState::NegotiatingFunding(flags) => flags.0,
505 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
506 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
507 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
508 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
512 fn is_pre_funded_state(&self) -> bool {
513 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
516 fn is_both_sides_shutdown(&self) -> bool {
517 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
520 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
522 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
523 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
524 _ => FundedStateFlags::new(),
528 fn can_generate_new_commitment(&self) -> bool {
530 ChannelState::ChannelReady(flags) =>
531 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
532 !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
533 !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
535 debug_assert!(false, "Can only generate new commitment within ChannelReady");
541 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
542 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
543 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
544 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
545 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
546 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
547 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
548 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
549 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
550 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
551 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
552 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
553 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
554 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
555 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
556 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
559 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
561 pub const DEFAULT_MAX_HTLCS: u16 = 50;
563 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
564 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
565 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
566 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
570 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
572 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
574 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
576 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
577 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
578 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
579 /// `holder_max_htlc_value_in_flight_msat`.
580 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
582 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
583 /// `option_support_large_channel` (aka wumbo channels) is not supported.
585 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
587 /// Total bitcoin supply in satoshis.
588 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
590 /// The maximum network dust limit for standard script formats. This currently represents the
591 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
592 /// transaction non-standard and thus refuses to relay it.
593 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
594 /// implementations use this value for their dust limit today.
595 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
597 /// The maximum channel dust limit we will accept from our counterparty.
598 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
600 /// The dust limit is used for both the commitment transaction outputs as well as the closing
601 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
602 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
603 /// In order to avoid having to concern ourselves with standardness during the closing process, we
604 /// simply require our counterparty to use a dust limit which will leave any segwit output
606 /// See <https://github.com/lightning/bolts/issues/905> for more details.
607 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
609 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
610 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
612 /// Used to return a simple Error back to ChannelManager. Will get converted to a
613 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
614 /// channel_id in ChannelManager.
615 pub(super) enum ChannelError {
621 impl fmt::Debug for ChannelError {
622 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
624 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
625 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
626 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
631 impl fmt::Display for ChannelError {
632 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
634 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
635 &ChannelError::Warn(ref e) => write!(f, "{}", e),
636 &ChannelError::Close(ref e) => write!(f, "{}", e),
641 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
643 pub peer_id: Option<PublicKey>,
644 pub channel_id: Option<ChannelId>,
647 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
648 fn log(&self, mut record: Record) {
649 record.peer_id = self.peer_id;
650 record.channel_id = self.channel_id;
651 self.logger.log(record)
655 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
656 where L::Target: Logger {
657 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
658 where S::Target: SignerProvider
662 peer_id: Some(context.counterparty_node_id),
663 channel_id: Some(context.channel_id),
668 macro_rules! secp_check {
669 ($res: expr, $err: expr) => {
672 Err(_) => return Err(ChannelError::Close($err)),
677 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
678 /// our counterparty or not. However, we don't want to announce updates right away to avoid
679 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
680 /// our channel_update message and track the current state here.
681 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
682 #[derive(Clone, Copy, PartialEq)]
683 pub(super) enum ChannelUpdateStatus {
684 /// We've announced the channel as enabled and are connected to our peer.
686 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
688 /// Our channel is live again, but we haven't announced the channel as enabled yet.
690 /// We've announced the channel as disabled.
694 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
696 pub enum AnnouncementSigsState {
697 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
698 /// we sent the last `AnnouncementSignatures`.
700 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
701 /// This state never appears on disk - instead we write `NotSent`.
703 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
704 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
705 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
706 /// they send back a `RevokeAndACK`.
707 /// This state never appears on disk - instead we write `NotSent`.
709 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
710 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
714 /// An enum indicating whether the local or remote side offered a given HTLC.
720 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
723 pending_htlcs_value_msat: u64,
724 on_counterparty_tx_dust_exposure_msat: u64,
725 on_holder_tx_dust_exposure_msat: u64,
726 holding_cell_msat: u64,
727 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
730 /// An enum gathering stats on commitment transaction, either local or remote.
731 struct CommitmentStats<'a> {
732 tx: CommitmentTransaction, // the transaction info
733 feerate_per_kw: u32, // the feerate included to build the transaction
734 total_fee_sat: u64, // the total fee included in the transaction
735 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
736 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
737 local_balance_msat: u64, // local balance before fees *not* considering dust limits
738 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
739 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
740 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
743 /// Used when calculating whether we or the remote can afford an additional HTLC.
744 struct HTLCCandidate {
746 origin: HTLCInitiator,
750 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
758 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
760 enum UpdateFulfillFetch {
762 monitor_update: ChannelMonitorUpdate,
763 htlc_value_msat: u64,
764 msg: Option<msgs::UpdateFulfillHTLC>,
769 /// The return type of get_update_fulfill_htlc_and_commit.
770 pub enum UpdateFulfillCommitFetch {
771 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
772 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
773 /// previously placed in the holding cell (and has since been removed).
775 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
776 monitor_update: ChannelMonitorUpdate,
777 /// The value of the HTLC which was claimed, in msat.
778 htlc_value_msat: u64,
780 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
781 /// or has been forgotten (presumably previously claimed).
785 /// The return value of `monitor_updating_restored`
786 pub(super) struct MonitorRestoreUpdates {
787 pub raa: Option<msgs::RevokeAndACK>,
788 pub commitment_update: Option<msgs::CommitmentUpdate>,
789 pub order: RAACommitmentOrder,
790 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
791 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
792 pub finalized_claimed_htlcs: Vec<HTLCSource>,
793 pub funding_broadcastable: Option<Transaction>,
794 pub channel_ready: Option<msgs::ChannelReady>,
795 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
798 /// The return value of `signer_maybe_unblocked`
800 pub(super) struct SignerResumeUpdates {
801 pub commitment_update: Option<msgs::CommitmentUpdate>,
802 pub funding_signed: Option<msgs::FundingSigned>,
803 pub channel_ready: Option<msgs::ChannelReady>,
806 /// The return value of `channel_reestablish`
807 pub(super) struct ReestablishResponses {
808 pub channel_ready: Option<msgs::ChannelReady>,
809 pub raa: Option<msgs::RevokeAndACK>,
810 pub commitment_update: Option<msgs::CommitmentUpdate>,
811 pub order: RAACommitmentOrder,
812 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
813 pub shutdown_msg: Option<msgs::Shutdown>,
816 /// The result of a shutdown that should be handled.
818 pub(crate) struct ShutdownResult {
819 pub(crate) closure_reason: ClosureReason,
820 /// A channel monitor update to apply.
821 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
822 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
823 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
824 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
825 /// propagated to the remainder of the batch.
826 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
827 pub(crate) channel_id: ChannelId,
828 pub(crate) user_channel_id: u128,
829 pub(crate) channel_capacity_satoshis: u64,
830 pub(crate) counterparty_node_id: PublicKey,
831 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
834 /// If the majority of the channels funds are to the fundee and the initiator holds only just
835 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
836 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
837 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
838 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
839 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
840 /// by this multiple without hitting this case, before sending.
841 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
842 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
843 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
844 /// leave the channel less usable as we hold a bigger reserve.
845 #[cfg(any(fuzzing, test))]
846 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
847 #[cfg(not(any(fuzzing, test)))]
848 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
850 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
851 /// channel creation on an inbound channel, we simply force-close and move on.
852 /// This constant is the one suggested in BOLT 2.
853 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
855 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
856 /// not have enough balance value remaining to cover the onchain cost of this new
857 /// HTLC weight. If this happens, our counterparty fails the reception of our
858 /// commitment_signed including this new HTLC due to infringement on the channel
860 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
861 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
862 /// leads to a channel force-close. Ultimately, this is an issue coming from the
863 /// design of LN state machines, allowing asynchronous updates.
864 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
866 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
867 /// commitment transaction fees, with at least this many HTLCs present on the commitment
868 /// transaction (not counting the value of the HTLCs themselves).
869 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
871 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
872 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
873 /// ChannelUpdate prompted by the config update. This value was determined as follows:
875 /// * The expected interval between ticks (1 minute).
876 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
877 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
878 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
879 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
881 /// The number of ticks that may elapse while we're waiting for a response to a
882 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
885 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
886 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
888 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
889 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
890 /// exceeding this age limit will be force-closed and purged from memory.
891 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
893 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
894 pub(crate) const COINBASE_MATURITY: u32 = 100;
896 struct PendingChannelMonitorUpdate {
897 update: ChannelMonitorUpdate,
900 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
901 (0, update, required),
904 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
905 /// its variants containing an appropriate channel struct.
906 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
907 UnfundedOutboundV1(OutboundV1Channel<SP>),
908 UnfundedInboundV1(InboundV1Channel<SP>),
912 impl<'a, SP: Deref> ChannelPhase<SP> where
913 SP::Target: SignerProvider,
914 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
916 pub fn context(&'a self) -> &'a ChannelContext<SP> {
918 ChannelPhase::Funded(chan) => &chan.context,
919 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
920 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
924 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
926 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
927 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
928 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
933 /// Contains all state common to unfunded inbound/outbound channels.
934 pub(super) struct UnfundedChannelContext {
935 /// A counter tracking how many ticks have elapsed since this unfunded channel was
936 /// created. If this unfunded channel reaches peer has yet to respond after reaching
937 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
939 /// This is so that we don't keep channels around that haven't progressed to a funded state
940 /// in a timely manner.
941 unfunded_channel_age_ticks: usize,
944 impl UnfundedChannelContext {
945 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
946 /// having reached the unfunded channel age limit.
948 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
949 pub fn should_expire_unfunded_channel(&mut self) -> bool {
950 self.unfunded_channel_age_ticks += 1;
951 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
955 /// Contains everything about the channel including state, and various flags.
956 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
957 config: LegacyChannelConfig,
959 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
960 // constructed using it. The second element in the tuple corresponds to the number of ticks that
961 // have elapsed since the update occurred.
962 prev_config: Option<(ChannelConfig, usize)>,
964 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
968 /// The current channel ID.
969 channel_id: ChannelId,
970 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
971 /// Will be `None` for channels created prior to 0.0.115.
972 temporary_channel_id: Option<ChannelId>,
973 channel_state: ChannelState,
975 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
976 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
978 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
979 // Note that a number of our tests were written prior to the behavior here which retransmits
980 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
982 #[cfg(any(test, feature = "_test_utils"))]
983 pub(crate) announcement_sigs_state: AnnouncementSigsState,
984 #[cfg(not(any(test, feature = "_test_utils")))]
985 announcement_sigs_state: AnnouncementSigsState,
987 secp_ctx: Secp256k1<secp256k1::All>,
988 channel_value_satoshis: u64,
990 latest_monitor_update_id: u64,
992 holder_signer: ChannelSignerType<SP>,
993 shutdown_scriptpubkey: Option<ShutdownScript>,
994 destination_script: ScriptBuf,
996 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
997 // generation start at 0 and count up...this simplifies some parts of implementation at the
998 // cost of others, but should really just be changed.
1000 cur_holder_commitment_transaction_number: u64,
1001 cur_counterparty_commitment_transaction_number: u64,
1002 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1003 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1004 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1005 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1007 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1008 /// need to ensure we resend them in the order we originally generated them. Note that because
1009 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1010 /// sufficient to simply set this to the opposite of any message we are generating as we
1011 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1012 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1014 resend_order: RAACommitmentOrder,
1016 monitor_pending_channel_ready: bool,
1017 monitor_pending_revoke_and_ack: bool,
1018 monitor_pending_commitment_signed: bool,
1020 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1021 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1022 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1023 // but need to handle this somehow or we run the risk of losing HTLCs!
1024 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1025 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1026 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1028 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1029 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1030 /// the future when the signer indicates it may have a signature for us.
1032 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1033 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1034 signer_pending_commitment_update: bool,
1035 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1036 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1037 /// outbound or inbound.
1038 signer_pending_funding: bool,
1040 // pending_update_fee is filled when sending and receiving update_fee.
1042 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1043 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1044 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1045 // HTLCs with similar state.
1046 pending_update_fee: Option<(u32, FeeUpdateState)>,
1047 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1048 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1049 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1050 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1051 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1052 holding_cell_update_fee: Option<u32>,
1053 next_holder_htlc_id: u64,
1054 next_counterparty_htlc_id: u64,
1055 feerate_per_kw: u32,
1057 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1058 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1059 /// new block is received, ensuring it's always at least moderately close to the current real
1061 update_time_counter: u32,
1063 #[cfg(debug_assertions)]
1064 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1065 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1066 #[cfg(debug_assertions)]
1067 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1068 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1070 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1071 target_closing_feerate_sats_per_kw: Option<u32>,
1073 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1074 /// update, we need to delay processing it until later. We do that here by simply storing the
1075 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1076 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1078 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1079 /// transaction. These are set once we reach `closing_negotiation_ready`.
1081 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1083 closing_fee_limits: Option<(u64, u64)>,
1085 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1086 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1087 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1088 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1089 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1091 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1092 /// until we see a `commitment_signed` before doing so.
1094 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1095 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1096 expecting_peer_commitment_signed: bool,
1098 /// The hash of the block in which the funding transaction was included.
1099 funding_tx_confirmed_in: Option<BlockHash>,
1100 funding_tx_confirmation_height: u32,
1101 short_channel_id: Option<u64>,
1102 /// Either the height at which this channel was created or the height at which it was last
1103 /// serialized if it was serialized by versions prior to 0.0.103.
1104 /// We use this to close if funding is never broadcasted.
1105 channel_creation_height: u32,
1107 counterparty_dust_limit_satoshis: u64,
1110 pub(super) holder_dust_limit_satoshis: u64,
1112 holder_dust_limit_satoshis: u64,
1115 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1117 counterparty_max_htlc_value_in_flight_msat: u64,
1120 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1122 holder_max_htlc_value_in_flight_msat: u64,
1124 /// minimum channel reserve for self to maintain - set by them.
1125 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1128 pub(super) holder_selected_channel_reserve_satoshis: u64,
1130 holder_selected_channel_reserve_satoshis: u64,
1132 counterparty_htlc_minimum_msat: u64,
1133 holder_htlc_minimum_msat: u64,
1135 pub counterparty_max_accepted_htlcs: u16,
1137 counterparty_max_accepted_htlcs: u16,
1138 holder_max_accepted_htlcs: u16,
1139 minimum_depth: Option<u32>,
1141 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1143 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1144 funding_transaction: Option<Transaction>,
1145 is_batch_funding: Option<()>,
1147 counterparty_cur_commitment_point: Option<PublicKey>,
1148 counterparty_prev_commitment_point: Option<PublicKey>,
1149 counterparty_node_id: PublicKey,
1151 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1153 commitment_secrets: CounterpartyCommitmentSecrets,
1155 channel_update_status: ChannelUpdateStatus,
1156 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1157 /// not complete within a single timer tick (one minute), we should force-close the channel.
1158 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1160 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1161 /// our peer and start the closing_signed negotiation fresh.
1162 closing_signed_in_flight: bool,
1164 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1165 /// This can be used to rebroadcast the channel_announcement message later.
1166 announcement_sigs: Option<(Signature, Signature)>,
1168 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1169 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1170 // be, by comparing the cached values to the fee of the tranaction generated by
1171 // `build_commitment_transaction`.
1172 #[cfg(any(test, fuzzing))]
1173 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1174 #[cfg(any(test, fuzzing))]
1175 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1177 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1178 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1179 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1180 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1181 /// message until we receive a channel_reestablish.
1183 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1184 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1186 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1187 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1188 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1189 /// unblock the state machine.
1191 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1192 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1193 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1195 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1196 /// [`msgs::RevokeAndACK`] message from the counterparty.
1197 sent_message_awaiting_response: Option<usize>,
1199 #[cfg(any(test, fuzzing))]
1200 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1201 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1202 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1203 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1204 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1205 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1206 historical_inbound_htlc_fulfills: HashSet<u64>,
1208 /// This channel's type, as negotiated during channel open
1209 channel_type: ChannelTypeFeatures,
1211 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1212 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1213 // the channel's funding UTXO.
1215 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1216 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1217 // associated channel mapping.
1219 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1220 // to store all of them.
1221 latest_inbound_scid_alias: Option<u64>,
1223 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1224 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1225 // don't currently support node id aliases and eventually privacy should be provided with
1226 // blinded paths instead of simple scid+node_id aliases.
1227 outbound_scid_alias: u64,
1229 // We track whether we already emitted a `ChannelPending` event.
1230 channel_pending_event_emitted: bool,
1232 // We track whether we already emitted a `ChannelReady` event.
1233 channel_ready_event_emitted: bool,
1235 /// The unique identifier used to re-derive the private key material for the channel through
1236 /// [`SignerProvider::derive_channel_signer`].
1237 channel_keys_id: [u8; 32],
1239 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1240 /// store it here and only release it to the `ChannelManager` once it asks for it.
1241 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1244 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1245 /// Allowed in any state (including after shutdown)
1246 pub fn get_update_time_counter(&self) -> u32 {
1247 self.update_time_counter
1250 pub fn get_latest_monitor_update_id(&self) -> u64 {
1251 self.latest_monitor_update_id
1254 pub fn should_announce(&self) -> bool {
1255 self.config.announced_channel
1258 pub fn is_outbound(&self) -> bool {
1259 self.channel_transaction_parameters.is_outbound_from_holder
1262 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1263 /// Allowed in any state (including after shutdown)
1264 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1265 self.config.options.forwarding_fee_base_msat
1268 /// Returns true if we've ever received a message from the remote end for this Channel
1269 pub fn have_received_message(&self) -> bool {
1270 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1273 /// Returns true if this channel is fully established and not known to be closing.
1274 /// Allowed in any state (including after shutdown)
1275 pub fn is_usable(&self) -> bool {
1276 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1277 !self.channel_state.is_local_shutdown_sent() &&
1278 !self.channel_state.is_remote_shutdown_sent() &&
1279 !self.monitor_pending_channel_ready
1282 /// shutdown state returns the state of the channel in its various stages of shutdown
1283 pub fn shutdown_state(&self) -> ChannelShutdownState {
1284 match self.channel_state {
1285 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1286 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1287 ChannelShutdownState::ShutdownInitiated
1288 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1289 ChannelShutdownState::ResolvingHTLCs
1290 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1291 ChannelShutdownState::NegotiatingClosingFee
1293 ChannelShutdownState::NotShuttingDown
1295 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1296 _ => ChannelShutdownState::NotShuttingDown,
1300 fn closing_negotiation_ready(&self) -> bool {
1301 let is_ready_to_close = match self.channel_state {
1302 ChannelState::AwaitingChannelReady(flags) =>
1303 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1304 ChannelState::ChannelReady(flags) =>
1305 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1308 self.pending_inbound_htlcs.is_empty() &&
1309 self.pending_outbound_htlcs.is_empty() &&
1310 self.pending_update_fee.is_none() &&
1314 /// Returns true if this channel is currently available for use. This is a superset of
1315 /// is_usable() and considers things like the channel being temporarily disabled.
1316 /// Allowed in any state (including after shutdown)
1317 pub fn is_live(&self) -> bool {
1318 self.is_usable() && !self.channel_state.is_peer_disconnected()
1321 // Public utilities:
1323 pub fn channel_id(&self) -> ChannelId {
1327 // Return the `temporary_channel_id` used during channel establishment.
1329 // Will return `None` for channels created prior to LDK version 0.0.115.
1330 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1331 self.temporary_channel_id
1334 pub fn minimum_depth(&self) -> Option<u32> {
1338 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1339 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1340 pub fn get_user_id(&self) -> u128 {
1344 /// Gets the channel's type
1345 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1349 /// Gets the channel's `short_channel_id`.
1351 /// Will return `None` if the channel hasn't been confirmed yet.
1352 pub fn get_short_channel_id(&self) -> Option<u64> {
1353 self.short_channel_id
1356 /// Allowed in any state (including after shutdown)
1357 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1358 self.latest_inbound_scid_alias
1361 /// Allowed in any state (including after shutdown)
1362 pub fn outbound_scid_alias(&self) -> u64 {
1363 self.outbound_scid_alias
1366 /// Returns the holder signer for this channel.
1368 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1369 return &self.holder_signer
1372 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1373 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1374 /// or prior to any channel actions during `Channel` initialization.
1375 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1376 debug_assert_eq!(self.outbound_scid_alias, 0);
1377 self.outbound_scid_alias = outbound_scid_alias;
1380 /// Returns the funding_txo we either got from our peer, or were given by
1381 /// get_funding_created.
1382 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1383 self.channel_transaction_parameters.funding_outpoint
1386 /// Returns the height in which our funding transaction was confirmed.
1387 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1388 let conf_height = self.funding_tx_confirmation_height;
1389 if conf_height > 0 {
1396 /// Returns the block hash in which our funding transaction was confirmed.
1397 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1398 self.funding_tx_confirmed_in
1401 /// Returns the current number of confirmations on the funding transaction.
1402 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1403 if self.funding_tx_confirmation_height == 0 {
1404 // We either haven't seen any confirmation yet, or observed a reorg.
1408 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1411 fn get_holder_selected_contest_delay(&self) -> u16 {
1412 self.channel_transaction_parameters.holder_selected_contest_delay
1415 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1416 &self.channel_transaction_parameters.holder_pubkeys
1419 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1420 self.channel_transaction_parameters.counterparty_parameters
1421 .as_ref().map(|params| params.selected_contest_delay)
1424 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1425 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1428 /// Allowed in any state (including after shutdown)
1429 pub fn get_counterparty_node_id(&self) -> PublicKey {
1430 self.counterparty_node_id
1433 /// Allowed in any state (including after shutdown)
1434 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1435 self.holder_htlc_minimum_msat
1438 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1439 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1440 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1443 /// Allowed in any state (including after shutdown)
1444 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1446 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1447 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1448 // channel might have been used to route very small values (either by honest users or as DoS).
1449 self.channel_value_satoshis * 1000 * 9 / 10,
1451 self.counterparty_max_htlc_value_in_flight_msat
1455 /// Allowed in any state (including after shutdown)
1456 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1457 self.counterparty_htlc_minimum_msat
1460 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1461 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1462 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1465 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1466 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1467 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1469 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1470 party_max_htlc_value_in_flight_msat
1475 pub fn get_value_satoshis(&self) -> u64 {
1476 self.channel_value_satoshis
1479 pub fn get_fee_proportional_millionths(&self) -> u32 {
1480 self.config.options.forwarding_fee_proportional_millionths
1483 pub fn get_cltv_expiry_delta(&self) -> u16 {
1484 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1487 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1488 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1489 where F::Target: FeeEstimator
1491 match self.config.options.max_dust_htlc_exposure {
1492 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1493 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1494 ConfirmationTarget::OnChainSweep) as u64;
1495 feerate_per_kw.saturating_mul(multiplier)
1497 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1501 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1502 pub fn prev_config(&self) -> Option<ChannelConfig> {
1503 self.prev_config.map(|prev_config| prev_config.0)
1506 // Checks whether we should emit a `ChannelPending` event.
1507 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1508 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1511 // Returns whether we already emitted a `ChannelPending` event.
1512 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1513 self.channel_pending_event_emitted
1516 // Remembers that we already emitted a `ChannelPending` event.
1517 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1518 self.channel_pending_event_emitted = true;
1521 // Checks whether we should emit a `ChannelReady` event.
1522 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1523 self.is_usable() && !self.channel_ready_event_emitted
1526 // Remembers that we already emitted a `ChannelReady` event.
1527 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1528 self.channel_ready_event_emitted = true;
1531 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1532 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1533 /// no longer be considered when forwarding HTLCs.
1534 pub fn maybe_expire_prev_config(&mut self) {
1535 if self.prev_config.is_none() {
1538 let prev_config = self.prev_config.as_mut().unwrap();
1540 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1541 self.prev_config = None;
1545 /// Returns the current [`ChannelConfig`] applied to the channel.
1546 pub fn config(&self) -> ChannelConfig {
1550 /// Updates the channel's config. A bool is returned indicating whether the config update
1551 /// applied resulted in a new ChannelUpdate message.
1552 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1553 let did_channel_update =
1554 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1555 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1556 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1557 if did_channel_update {
1558 self.prev_config = Some((self.config.options, 0));
1559 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1560 // policy change to propagate throughout the network.
1561 self.update_time_counter += 1;
1563 self.config.options = *config;
1567 /// Returns true if funding_signed was sent/received and the
1568 /// funding transaction has been broadcast if necessary.
1569 pub fn is_funding_broadcast(&self) -> bool {
1570 !self.channel_state.is_pre_funded_state() &&
1571 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1574 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1575 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1576 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1577 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1578 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1580 /// @local is used only to convert relevant internal structures which refer to remote vs local
1581 /// to decide value of outputs and direction of HTLCs.
1582 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1583 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1584 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1585 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1586 /// which peer generated this transaction and "to whom" this transaction flows.
1588 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1589 where L::Target: Logger
1591 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1592 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1593 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1595 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1596 let mut remote_htlc_total_msat = 0;
1597 let mut local_htlc_total_msat = 0;
1598 let mut value_to_self_msat_offset = 0;
1600 let mut feerate_per_kw = self.feerate_per_kw;
1601 if let Some((feerate, update_state)) = self.pending_update_fee {
1602 if match update_state {
1603 // Note that these match the inclusion criteria when scanning
1604 // pending_inbound_htlcs below.
1605 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1606 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1607 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1609 feerate_per_kw = feerate;
1613 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1614 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1615 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1617 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1619 macro_rules! get_htlc_in_commitment {
1620 ($htlc: expr, $offered: expr) => {
1621 HTLCOutputInCommitment {
1623 amount_msat: $htlc.amount_msat,
1624 cltv_expiry: $htlc.cltv_expiry,
1625 payment_hash: $htlc.payment_hash,
1626 transaction_output_index: None
1631 macro_rules! add_htlc_output {
1632 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1633 if $outbound == local { // "offered HTLC output"
1634 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1635 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1638 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1640 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1641 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1642 included_non_dust_htlcs.push((htlc_in_tx, $source));
1644 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1645 included_dust_htlcs.push((htlc_in_tx, $source));
1648 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1649 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1652 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1654 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1655 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1656 included_non_dust_htlcs.push((htlc_in_tx, $source));
1658 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1659 included_dust_htlcs.push((htlc_in_tx, $source));
1665 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1667 for ref htlc in self.pending_inbound_htlcs.iter() {
1668 let (include, state_name) = match htlc.state {
1669 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1670 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1671 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1672 InboundHTLCState::Committed => (true, "Committed"),
1673 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1677 add_htlc_output!(htlc, false, None, state_name);
1678 remote_htlc_total_msat += htlc.amount_msat;
1680 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1682 &InboundHTLCState::LocalRemoved(ref reason) => {
1683 if generated_by_local {
1684 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1685 inbound_htlc_preimages.push(preimage);
1686 value_to_self_msat_offset += htlc.amount_msat as i64;
1696 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1698 for ref htlc in self.pending_outbound_htlcs.iter() {
1699 let (include, state_name) = match htlc.state {
1700 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1701 OutboundHTLCState::Committed => (true, "Committed"),
1702 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1703 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1704 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1707 let preimage_opt = match htlc.state {
1708 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1709 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1710 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1714 if let Some(preimage) = preimage_opt {
1715 outbound_htlc_preimages.push(preimage);
1719 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1720 local_htlc_total_msat += htlc.amount_msat;
1722 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1724 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1725 value_to_self_msat_offset -= htlc.amount_msat as i64;
1727 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1728 if !generated_by_local {
1729 value_to_self_msat_offset -= htlc.amount_msat as i64;
1737 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1738 assert!(value_to_self_msat >= 0);
1739 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1740 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1741 // "violate" their reserve value by couting those against it. Thus, we have to convert
1742 // everything to i64 before subtracting as otherwise we can overflow.
1743 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1744 assert!(value_to_remote_msat >= 0);
1746 #[cfg(debug_assertions)]
1748 // Make sure that the to_self/to_remote is always either past the appropriate
1749 // channel_reserve *or* it is making progress towards it.
1750 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1751 self.holder_max_commitment_tx_output.lock().unwrap()
1753 self.counterparty_max_commitment_tx_output.lock().unwrap()
1755 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1756 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1757 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1758 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1761 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1762 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1763 let (value_to_self, value_to_remote) = if self.is_outbound() {
1764 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1766 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1769 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1770 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1771 let (funding_pubkey_a, funding_pubkey_b) = if local {
1772 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1774 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1777 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1778 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1783 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1784 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1789 let num_nondust_htlcs = included_non_dust_htlcs.len();
1791 let channel_parameters =
1792 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1793 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1794 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1801 &mut included_non_dust_htlcs,
1804 let mut htlcs_included = included_non_dust_htlcs;
1805 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1806 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1807 htlcs_included.append(&mut included_dust_htlcs);
1815 local_balance_msat: value_to_self_msat as u64,
1816 remote_balance_msat: value_to_remote_msat as u64,
1817 inbound_htlc_preimages,
1818 outbound_htlc_preimages,
1823 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1824 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1825 /// our counterparty!)
1826 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1827 /// TODO Some magic rust shit to compile-time check this?
1828 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1829 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1830 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1831 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1832 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1834 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1838 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1839 /// will sign and send to our counterparty.
1840 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1841 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1842 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1843 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1844 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1846 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1849 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1850 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1851 /// Panics if called before accept_channel/InboundV1Channel::new
1852 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1853 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1856 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1857 &self.get_counterparty_pubkeys().funding_pubkey
1860 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1864 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1865 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1866 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1867 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1868 // more dust balance if the feerate increases when we have several HTLCs pending
1869 // which are near the dust limit.
1870 let mut feerate_per_kw = self.feerate_per_kw;
1871 // If there's a pending update fee, use it to ensure we aren't under-estimating
1872 // potential feerate updates coming soon.
1873 if let Some((feerate, _)) = self.pending_update_fee {
1874 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1876 if let Some(feerate) = outbound_feerate_update {
1877 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1879 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
1880 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
1883 /// Get forwarding information for the counterparty.
1884 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1885 self.counterparty_forwarding_info.clone()
1888 /// Returns a HTLCStats about inbound pending htlcs
1889 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1891 let mut stats = HTLCStats {
1892 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1893 pending_htlcs_value_msat: 0,
1894 on_counterparty_tx_dust_exposure_msat: 0,
1895 on_holder_tx_dust_exposure_msat: 0,
1896 holding_cell_msat: 0,
1897 on_holder_tx_holding_cell_htlcs_count: 0,
1900 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1903 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1904 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1905 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1907 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1908 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1909 for ref htlc in context.pending_inbound_htlcs.iter() {
1910 stats.pending_htlcs_value_msat += htlc.amount_msat;
1911 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1912 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1914 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1915 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1921 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1922 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1924 let mut stats = HTLCStats {
1925 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1926 pending_htlcs_value_msat: 0,
1927 on_counterparty_tx_dust_exposure_msat: 0,
1928 on_holder_tx_dust_exposure_msat: 0,
1929 holding_cell_msat: 0,
1930 on_holder_tx_holding_cell_htlcs_count: 0,
1933 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1936 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1937 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1938 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1940 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1941 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1942 for ref htlc in context.pending_outbound_htlcs.iter() {
1943 stats.pending_htlcs_value_msat += htlc.amount_msat;
1944 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1945 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1947 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1948 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1952 for update in context.holding_cell_htlc_updates.iter() {
1953 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1954 stats.pending_htlcs += 1;
1955 stats.pending_htlcs_value_msat += amount_msat;
1956 stats.holding_cell_msat += amount_msat;
1957 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1958 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1960 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1961 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1963 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1970 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1971 /// Doesn't bother handling the
1972 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1973 /// corner case properly.
1974 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1975 -> AvailableBalances
1976 where F::Target: FeeEstimator
1978 let context = &self;
1979 // Note that we have to handle overflow due to the above case.
1980 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1981 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1983 let mut balance_msat = context.value_to_self_msat;
1984 for ref htlc in context.pending_inbound_htlcs.iter() {
1985 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1986 balance_msat += htlc.amount_msat;
1989 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1991 let outbound_capacity_msat = context.value_to_self_msat
1992 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1994 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1996 let mut available_capacity_msat = outbound_capacity_msat;
1998 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1999 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2003 if context.is_outbound() {
2004 // We should mind channel commit tx fee when computing how much of the available capacity
2005 // can be used in the next htlc. Mirrors the logic in send_htlc.
2007 // The fee depends on whether the amount we will be sending is above dust or not,
2008 // and the answer will in turn change the amount itself — making it a circular
2010 // This complicates the computation around dust-values, up to the one-htlc-value.
2011 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2012 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2013 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2016 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2017 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2018 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2019 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2020 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2021 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2022 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2025 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2026 // value ends up being below dust, we have this fee available again. In that case,
2027 // match the value to right-below-dust.
2028 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2029 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2030 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2031 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2032 debug_assert!(one_htlc_difference_msat != 0);
2033 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2034 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2035 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2037 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2040 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2041 // sending a new HTLC won't reduce their balance below our reserve threshold.
2042 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2043 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2044 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2047 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2048 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2050 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2051 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2052 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2054 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2055 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2056 // we've selected for them, we can only send dust HTLCs.
2057 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2061 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2063 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2064 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2065 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2066 // send above the dust limit (as the router can always overpay to meet the dust limit).
2067 let mut remaining_msat_below_dust_exposure_limit = None;
2068 let mut dust_exposure_dust_limit_msat = 0;
2069 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2071 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2072 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2074 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2075 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2076 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2078 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2079 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2080 remaining_msat_below_dust_exposure_limit =
2081 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2082 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2085 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2086 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2087 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2088 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2089 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2090 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2093 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2094 if available_capacity_msat < dust_exposure_dust_limit_msat {
2095 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2097 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2101 available_capacity_msat = cmp::min(available_capacity_msat,
2102 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2104 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2105 available_capacity_msat = 0;
2109 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2110 - context.value_to_self_msat as i64
2111 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2112 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2114 outbound_capacity_msat,
2115 next_outbound_htlc_limit_msat: available_capacity_msat,
2116 next_outbound_htlc_minimum_msat,
2121 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2122 let context = &self;
2123 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2126 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2127 /// number of pending HTLCs that are on track to be in our next commitment tx.
2129 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2130 /// `fee_spike_buffer_htlc` is `Some`.
2132 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2133 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2135 /// Dust HTLCs are excluded.
2136 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2137 let context = &self;
2138 assert!(context.is_outbound());
2140 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2143 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2144 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2146 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2147 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2149 let mut addl_htlcs = 0;
2150 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2152 HTLCInitiator::LocalOffered => {
2153 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2157 HTLCInitiator::RemoteOffered => {
2158 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2164 let mut included_htlcs = 0;
2165 for ref htlc in context.pending_inbound_htlcs.iter() {
2166 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2169 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2170 // transaction including this HTLC if it times out before they RAA.
2171 included_htlcs += 1;
2174 for ref htlc in context.pending_outbound_htlcs.iter() {
2175 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2179 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2180 OutboundHTLCState::Committed => included_htlcs += 1,
2181 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2182 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2183 // transaction won't be generated until they send us their next RAA, which will mean
2184 // dropping any HTLCs in this state.
2189 for htlc in context.holding_cell_htlc_updates.iter() {
2191 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2192 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2197 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2198 // ack we're guaranteed to never include them in commitment txs anymore.
2202 let num_htlcs = included_htlcs + addl_htlcs;
2203 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2204 #[cfg(any(test, fuzzing))]
2207 if fee_spike_buffer_htlc.is_some() {
2208 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2210 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2211 + context.holding_cell_htlc_updates.len();
2212 let commitment_tx_info = CommitmentTxInfoCached {
2214 total_pending_htlcs,
2215 next_holder_htlc_id: match htlc.origin {
2216 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2217 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2219 next_counterparty_htlc_id: match htlc.origin {
2220 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2221 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2223 feerate: context.feerate_per_kw,
2225 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2230 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2231 /// pending HTLCs that are on track to be in their next commitment tx
2233 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2234 /// `fee_spike_buffer_htlc` is `Some`.
2236 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2237 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2239 /// Dust HTLCs are excluded.
2240 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2241 let context = &self;
2242 assert!(!context.is_outbound());
2244 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2247 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2248 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2250 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2251 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2253 let mut addl_htlcs = 0;
2254 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2256 HTLCInitiator::LocalOffered => {
2257 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2261 HTLCInitiator::RemoteOffered => {
2262 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2268 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2269 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2270 // committed outbound HTLCs, see below.
2271 let mut included_htlcs = 0;
2272 for ref htlc in context.pending_inbound_htlcs.iter() {
2273 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2276 included_htlcs += 1;
2279 for ref htlc in context.pending_outbound_htlcs.iter() {
2280 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2283 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2284 // i.e. if they've responded to us with an RAA after announcement.
2286 OutboundHTLCState::Committed => included_htlcs += 1,
2287 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2288 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2293 let num_htlcs = included_htlcs + addl_htlcs;
2294 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2295 #[cfg(any(test, fuzzing))]
2298 if fee_spike_buffer_htlc.is_some() {
2299 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2301 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2302 let commitment_tx_info = CommitmentTxInfoCached {
2304 total_pending_htlcs,
2305 next_holder_htlc_id: match htlc.origin {
2306 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2307 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2309 next_counterparty_htlc_id: match htlc.origin {
2310 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2311 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2313 feerate: context.feerate_per_kw,
2315 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2320 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
2321 match self.channel_state {
2322 ChannelState::FundingNegotiated => f(),
2323 ChannelState::AwaitingChannelReady(flags) =>
2324 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
2325 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
2335 /// Returns the transaction if there is a pending funding transaction that is yet to be
2337 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2338 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2341 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2343 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2344 self.if_unbroadcasted_funding(||
2345 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2349 /// Returns whether the channel is funded in a batch.
2350 pub fn is_batch_funding(&self) -> bool {
2351 self.is_batch_funding.is_some()
2354 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2356 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2357 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2360 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2361 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2362 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2363 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2364 /// immediately (others we will have to allow to time out).
2365 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
2366 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2367 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2368 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2369 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2370 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2372 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2373 // return them to fail the payment.
2374 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2375 let counterparty_node_id = self.get_counterparty_node_id();
2376 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2378 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2379 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2384 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2385 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2386 // returning a channel monitor update here would imply a channel monitor update before
2387 // we even registered the channel monitor to begin with, which is invalid.
2388 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2389 // funding transaction, don't return a funding txo (which prevents providing the
2390 // monitor update to the user, even if we return one).
2391 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2392 if !self.channel_state.is_pre_funded_state() {
2393 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2394 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2395 update_id: self.latest_monitor_update_id,
2396 counterparty_node_id: Some(self.counterparty_node_id),
2397 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2401 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2402 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
2404 self.channel_state = ChannelState::ShutdownComplete;
2405 self.update_time_counter += 1;
2409 dropped_outbound_htlcs,
2410 unbroadcasted_batch_funding_txid,
2411 channel_id: self.channel_id,
2412 user_channel_id: self.user_id,
2413 channel_capacity_satoshis: self.channel_value_satoshis,
2414 counterparty_node_id: self.counterparty_node_id,
2415 unbroadcasted_funding_tx,
2419 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2420 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2421 let counterparty_keys = self.build_remote_transaction_keys();
2422 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2424 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2425 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2426 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2427 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2429 match &self.holder_signer {
2430 // TODO (arik): move match into calling method for Taproot
2431 ChannelSignerType::Ecdsa(ecdsa) => {
2432 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2433 .map(|(signature, _)| msgs::FundingSigned {
2434 channel_id: self.channel_id(),
2437 partial_signature_with_nonce: None,
2441 if funding_signed.is_none() {
2442 #[cfg(not(async_signing))] {
2443 panic!("Failed to get signature for funding_signed");
2445 #[cfg(async_signing)] {
2446 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2447 self.signer_pending_funding = true;
2449 } else if self.signer_pending_funding {
2450 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2451 self.signer_pending_funding = false;
2454 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2455 (counterparty_initial_commitment_tx, funding_signed)
2457 // TODO (taproot|arik)
2464 // Internal utility functions for channels
2466 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2467 /// `channel_value_satoshis` in msat, set through
2468 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2470 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2472 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2473 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2474 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2476 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2479 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2481 channel_value_satoshis * 10 * configured_percent
2484 /// Returns a minimum channel reserve value the remote needs to maintain,
2485 /// required by us according to the configured or default
2486 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2488 /// Guaranteed to return a value no larger than channel_value_satoshis
2490 /// This is used both for outbound and inbound channels and has lower bound
2491 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2492 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2493 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2494 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2497 /// This is for legacy reasons, present for forward-compatibility.
2498 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2499 /// from storage. Hence, we use this function to not persist default values of
2500 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2501 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2502 let (q, _) = channel_value_satoshis.overflowing_div(100);
2503 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2506 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2507 // Note that num_htlcs should not include dust HTLCs.
2509 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2510 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2513 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2514 // Note that num_htlcs should not include dust HTLCs.
2515 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2516 // Note that we need to divide before multiplying to round properly,
2517 // since the lowest denomination of bitcoin on-chain is the satoshi.
2518 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2521 // Holder designates channel data owned for the benefit of the user client.
2522 // Counterparty designates channel data owned by the another channel participant entity.
2523 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2524 pub context: ChannelContext<SP>,
2527 #[cfg(any(test, fuzzing))]
2528 struct CommitmentTxInfoCached {
2530 total_pending_htlcs: usize,
2531 next_holder_htlc_id: u64,
2532 next_counterparty_htlc_id: u64,
2536 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2537 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2538 trait FailHTLCContents {
2539 type Message: FailHTLCMessageName;
2540 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2541 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2542 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2544 impl FailHTLCContents for msgs::OnionErrorPacket {
2545 type Message = msgs::UpdateFailHTLC;
2546 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2547 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2549 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2550 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2552 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2553 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2556 impl FailHTLCContents for ([u8; 32], u16) {
2557 type Message = msgs::UpdateFailMalformedHTLC;
2558 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2559 msgs::UpdateFailMalformedHTLC {
2562 sha256_of_onion: self.0,
2563 failure_code: self.1
2566 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2567 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
2569 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2570 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2572 sha256_of_onion: self.0,
2573 failure_code: self.1
2578 trait FailHTLCMessageName {
2579 fn name() -> &'static str;
2581 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2582 fn name() -> &'static str {
2586 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2587 fn name() -> &'static str {
2588 "update_fail_malformed_htlc"
2592 impl<SP: Deref> Channel<SP> where
2593 SP::Target: SignerProvider,
2594 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2596 fn check_remote_fee<F: Deref, L: Deref>(
2597 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2598 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2599 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2601 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2602 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2604 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2606 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2607 if feerate_per_kw < lower_limit {
2608 if let Some(cur_feerate) = cur_feerate_per_kw {
2609 if feerate_per_kw > cur_feerate {
2611 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2612 cur_feerate, feerate_per_kw);
2616 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2622 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2623 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2624 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2625 // outside of those situations will fail.
2626 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2630 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2635 1 + // script length (0)
2639 )*4 + // * 4 for non-witness parts
2640 2 + // witness marker and flag
2641 1 + // witness element count
2642 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2643 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2644 2*(1 + 71); // two signatures + sighash type flags
2645 if let Some(spk) = a_scriptpubkey {
2646 ret += ((8+1) + // output values and script length
2647 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2649 if let Some(spk) = b_scriptpubkey {
2650 ret += ((8+1) + // output values and script length
2651 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2657 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2658 assert!(self.context.pending_inbound_htlcs.is_empty());
2659 assert!(self.context.pending_outbound_htlcs.is_empty());
2660 assert!(self.context.pending_update_fee.is_none());
2662 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2663 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2664 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2666 if value_to_holder < 0 {
2667 assert!(self.context.is_outbound());
2668 total_fee_satoshis += (-value_to_holder) as u64;
2669 } else if value_to_counterparty < 0 {
2670 assert!(!self.context.is_outbound());
2671 total_fee_satoshis += (-value_to_counterparty) as u64;
2674 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2675 value_to_counterparty = 0;
2678 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2679 value_to_holder = 0;
2682 assert!(self.context.shutdown_scriptpubkey.is_some());
2683 let holder_shutdown_script = self.get_closing_scriptpubkey();
2684 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2685 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2687 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2688 (closing_transaction, total_fee_satoshis)
2691 fn funding_outpoint(&self) -> OutPoint {
2692 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2695 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2698 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2699 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2701 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2703 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2704 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2705 where L::Target: Logger {
2706 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2707 // (see equivalent if condition there).
2708 assert!(!self.context.channel_state.can_generate_new_commitment());
2709 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2710 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2711 self.context.latest_monitor_update_id = mon_update_id;
2712 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2713 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2717 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2718 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2719 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2720 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2722 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2723 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2726 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2727 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2728 // these, but for now we just have to treat them as normal.
2730 let mut pending_idx = core::usize::MAX;
2731 let mut htlc_value_msat = 0;
2732 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2733 if htlc.htlc_id == htlc_id_arg {
2734 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2735 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2736 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2738 InboundHTLCState::Committed => {},
2739 InboundHTLCState::LocalRemoved(ref reason) => {
2740 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2742 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2743 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2745 return UpdateFulfillFetch::DuplicateClaim {};
2748 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2749 // Don't return in release mode here so that we can update channel_monitor
2753 htlc_value_msat = htlc.amount_msat;
2757 if pending_idx == core::usize::MAX {
2758 #[cfg(any(test, fuzzing))]
2759 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2760 // this is simply a duplicate claim, not previously failed and we lost funds.
2761 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2762 return UpdateFulfillFetch::DuplicateClaim {};
2765 // Now update local state:
2767 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2768 // can claim it even if the channel hits the chain before we see their next commitment.
2769 self.context.latest_monitor_update_id += 1;
2770 let monitor_update = ChannelMonitorUpdate {
2771 update_id: self.context.latest_monitor_update_id,
2772 counterparty_node_id: Some(self.context.counterparty_node_id),
2773 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2774 payment_preimage: payment_preimage_arg.clone(),
2778 if !self.context.channel_state.can_generate_new_commitment() {
2779 // Note that this condition is the same as the assertion in
2780 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2781 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2782 // do not not get into this branch.
2783 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2784 match pending_update {
2785 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2786 if htlc_id_arg == htlc_id {
2787 // Make sure we don't leave latest_monitor_update_id incremented here:
2788 self.context.latest_monitor_update_id -= 1;
2789 #[cfg(any(test, fuzzing))]
2790 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2791 return UpdateFulfillFetch::DuplicateClaim {};
2794 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2795 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2797 if htlc_id_arg == htlc_id {
2798 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2799 // TODO: We may actually be able to switch to a fulfill here, though its
2800 // rare enough it may not be worth the complexity burden.
2801 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2802 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2808 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2809 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2810 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2812 #[cfg(any(test, fuzzing))]
2813 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2814 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2816 #[cfg(any(test, fuzzing))]
2817 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2820 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2821 if let InboundHTLCState::Committed = htlc.state {
2823 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2824 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2826 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2827 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2830 UpdateFulfillFetch::NewClaim {
2833 msg: Some(msgs::UpdateFulfillHTLC {
2834 channel_id: self.context.channel_id(),
2835 htlc_id: htlc_id_arg,
2836 payment_preimage: payment_preimage_arg,
2841 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2842 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2843 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2844 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2845 // Even if we aren't supposed to let new monitor updates with commitment state
2846 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2847 // matter what. Sadly, to push a new monitor update which flies before others
2848 // already queued, we have to insert it into the pending queue and update the
2849 // update_ids of all the following monitors.
2850 if release_cs_monitor && msg.is_some() {
2851 let mut additional_update = self.build_commitment_no_status_check(logger);
2852 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2853 // to be strictly increasing by one, so decrement it here.
2854 self.context.latest_monitor_update_id = monitor_update.update_id;
2855 monitor_update.updates.append(&mut additional_update.updates);
2857 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2858 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2859 monitor_update.update_id = new_mon_id;
2860 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2861 held_update.update.update_id += 1;
2864 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2865 let update = self.build_commitment_no_status_check(logger);
2866 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2872 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2873 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2875 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2879 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2880 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2881 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2882 /// before we fail backwards.
2884 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2885 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2886 /// [`ChannelError::Ignore`].
2887 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2888 -> Result<(), ChannelError> where L::Target: Logger {
2889 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2890 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2893 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2894 /// want to fail blinded HTLCs where we are not the intro node.
2896 /// See [`Self::queue_fail_htlc`] for more info.
2897 pub fn queue_fail_malformed_htlc<L: Deref>(
2898 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2899 ) -> Result<(), ChannelError> where L::Target: Logger {
2900 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
2901 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2904 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2905 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2906 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2907 /// before we fail backwards.
2909 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2910 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2911 /// [`ChannelError::Ignore`].
2912 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2913 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
2915 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2916 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2917 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2920 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2921 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2922 // these, but for now we just have to treat them as normal.
2924 let mut pending_idx = core::usize::MAX;
2925 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2926 if htlc.htlc_id == htlc_id_arg {
2928 InboundHTLCState::Committed => {},
2929 InboundHTLCState::LocalRemoved(ref reason) => {
2930 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2932 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2937 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2938 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2944 if pending_idx == core::usize::MAX {
2945 #[cfg(any(test, fuzzing))]
2946 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2947 // is simply a duplicate fail, not previously failed and we failed-back too early.
2948 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2952 if !self.context.channel_state.can_generate_new_commitment() {
2953 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2954 force_holding_cell = true;
2957 // Now update local state:
2958 if force_holding_cell {
2959 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2960 match pending_update {
2961 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2962 if htlc_id_arg == htlc_id {
2963 #[cfg(any(test, fuzzing))]
2964 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2968 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2969 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2971 if htlc_id_arg == htlc_id {
2972 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2973 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2979 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2980 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
2984 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2985 E::Message::name(), &self.context.channel_id());
2987 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2988 htlc.state = err_contents.clone().to_inbound_htlc_state();
2991 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
2994 // Message handlers:
2995 /// Updates the state of the channel to indicate that all channels in the batch have received
2996 /// funding_signed and persisted their monitors.
2997 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2998 /// treated as a non-batch channel going forward.
2999 pub fn set_batch_ready(&mut self) {
3000 self.context.is_batch_funding = None;
3001 self.context.channel_state.clear_waiting_for_batch();
3004 /// Unsets the existing funding information.
3006 /// This must only be used if the channel has not yet completed funding and has not been used.
3008 /// Further, the channel must be immediately shut down after this with a call to
3009 /// [`ChannelContext::force_shutdown`].
3010 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3011 debug_assert!(matches!(
3012 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3014 self.context.channel_transaction_parameters.funding_outpoint = None;
3015 self.context.channel_id = temporary_channel_id;
3018 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3019 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3021 pub fn channel_ready<NS: Deref, L: Deref>(
3022 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3023 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3024 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3026 NS::Target: NodeSigner,
3029 if self.context.channel_state.is_peer_disconnected() {
3030 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3031 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3034 if let Some(scid_alias) = msg.short_channel_id_alias {
3035 if Some(scid_alias) != self.context.short_channel_id {
3036 // The scid alias provided can be used to route payments *from* our counterparty,
3037 // i.e. can be used for inbound payments and provided in invoices, but is not used
3038 // when routing outbound payments.
3039 self.context.latest_inbound_scid_alias = Some(scid_alias);
3043 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3044 // batch, but we can receive channel_ready messages.
3045 let mut check_reconnection = false;
3046 match &self.context.channel_state {
3047 ChannelState::AwaitingChannelReady(flags) => {
3048 let flags = *flags & !FundedStateFlags::ALL;
3049 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3050 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3051 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3052 check_reconnection = true;
3053 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3054 self.context.channel_state.set_their_channel_ready();
3055 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3056 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3057 self.context.update_time_counter += 1;
3059 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3060 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3063 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3064 ChannelState::ChannelReady(_) => check_reconnection = true,
3065 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3067 if check_reconnection {
3068 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3069 // required, or they're sending a fresh SCID alias.
3070 let expected_point =
3071 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3072 // If they haven't ever sent an updated point, the point they send should match
3074 self.context.counterparty_cur_commitment_point
3075 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3076 // If we've advanced the commitment number once, the second commitment point is
3077 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3078 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3079 self.context.counterparty_prev_commitment_point
3081 // If they have sent updated points, channel_ready is always supposed to match
3082 // their "first" point, which we re-derive here.
3083 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3084 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3085 ).expect("We already advanced, so previous secret keys should have been validated already")))
3087 if expected_point != Some(msg.next_per_commitment_point) {
3088 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3093 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3094 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3096 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3098 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3101 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3102 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3103 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3104 ) -> Result<(), ChannelError>
3105 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3106 FE::Target: FeeEstimator, L::Target: Logger,
3108 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3109 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3111 // We can't accept HTLCs sent after we've sent a shutdown.
3112 if self.context.channel_state.is_local_shutdown_sent() {
3113 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3115 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3116 if self.context.channel_state.is_remote_shutdown_sent() {
3117 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3119 if self.context.channel_state.is_peer_disconnected() {
3120 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3122 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3123 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3125 if msg.amount_msat == 0 {
3126 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3128 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3129 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3132 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3133 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3134 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3135 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3137 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3138 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3141 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3142 // the reserve_satoshis we told them to always have as direct payment so that they lose
3143 // something if we punish them for broadcasting an old state).
3144 // Note that we don't really care about having a small/no to_remote output in our local
3145 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3146 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3147 // present in the next commitment transaction we send them (at least for fulfilled ones,
3148 // failed ones won't modify value_to_self).
3149 // Note that we will send HTLCs which another instance of rust-lightning would think
3150 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3151 // Channel state once they will not be present in the next received commitment
3153 let mut removed_outbound_total_msat = 0;
3154 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3155 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3156 removed_outbound_total_msat += htlc.amount_msat;
3157 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3158 removed_outbound_total_msat += htlc.amount_msat;
3162 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3163 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3166 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3167 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3168 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3170 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3171 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3172 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3173 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3174 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3175 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3176 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3180 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3181 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3182 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3183 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3184 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3185 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3186 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3190 let pending_value_to_self_msat =
3191 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3192 let pending_remote_value_msat =
3193 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3194 if pending_remote_value_msat < msg.amount_msat {
3195 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3198 // Check that the remote can afford to pay for this HTLC on-chain at the current
3199 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3201 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3202 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3203 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3205 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3206 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3210 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3211 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3213 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3214 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3218 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3219 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3223 if !self.context.is_outbound() {
3224 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3225 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3226 // side, only on the sender's. Note that with anchor outputs we are no longer as
3227 // sensitive to fee spikes, so we need to account for them.
3228 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3229 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3230 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3231 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3233 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3234 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3235 // the HTLC, i.e. its status is already set to failing.
3236 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3237 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3240 // Check that they won't violate our local required channel reserve by adding this HTLC.
3241 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3242 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3243 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3244 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3247 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3248 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3250 if msg.cltv_expiry >= 500000000 {
3251 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3254 if self.context.channel_state.is_local_shutdown_sent() {
3255 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3256 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3260 // Now update local state:
3261 self.context.next_counterparty_htlc_id += 1;
3262 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3263 htlc_id: msg.htlc_id,
3264 amount_msat: msg.amount_msat,
3265 payment_hash: msg.payment_hash,
3266 cltv_expiry: msg.cltv_expiry,
3267 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3272 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3274 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3275 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3276 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3277 if htlc.htlc_id == htlc_id {
3278 let outcome = match check_preimage {
3279 None => fail_reason.into(),
3280 Some(payment_preimage) => {
3281 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3282 if payment_hash != htlc.payment_hash {
3283 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3285 OutboundHTLCOutcome::Success(Some(payment_preimage))
3289 OutboundHTLCState::LocalAnnounced(_) =>
3290 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3291 OutboundHTLCState::Committed => {
3292 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3294 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3295 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3300 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3303 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3304 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3305 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3307 if self.context.channel_state.is_peer_disconnected() {
3308 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3311 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3314 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3315 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3316 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3318 if self.context.channel_state.is_peer_disconnected() {
3319 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3322 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3326 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3327 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3328 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3330 if self.context.channel_state.is_peer_disconnected() {
3331 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3334 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3338 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3339 where L::Target: Logger
3341 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3342 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3344 if self.context.channel_state.is_peer_disconnected() {
3345 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3347 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3348 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3351 let funding_script = self.context.get_funding_redeemscript();
3353 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3355 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3356 let commitment_txid = {
3357 let trusted_tx = commitment_stats.tx.trust();
3358 let bitcoin_tx = trusted_tx.built_transaction();
3359 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3361 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3362 log_bytes!(msg.signature.serialize_compact()[..]),
3363 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3364 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3365 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3366 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3370 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3372 // If our counterparty updated the channel fee in this commitment transaction, check that
3373 // they can actually afford the new fee now.
3374 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3375 update_state == FeeUpdateState::RemoteAnnounced
3378 debug_assert!(!self.context.is_outbound());
3379 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3380 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3381 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3384 #[cfg(any(test, fuzzing))]
3386 if self.context.is_outbound() {
3387 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3388 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3389 if let Some(info) = projected_commit_tx_info {
3390 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3391 + self.context.holding_cell_htlc_updates.len();
3392 if info.total_pending_htlcs == total_pending_htlcs
3393 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3394 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3395 && info.feerate == self.context.feerate_per_kw {
3396 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3402 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3403 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3406 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3407 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3408 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3409 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3410 // backwards compatibility, we never use it in production. To provide test coverage, here,
3411 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3412 #[allow(unused_assignments, unused_mut)]
3413 let mut separate_nondust_htlc_sources = false;
3414 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3415 use core::hash::{BuildHasher, Hasher};
3416 // Get a random value using the only std API to do so - the DefaultHasher
3417 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3418 separate_nondust_htlc_sources = rand_val % 2 == 0;
3421 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3422 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3423 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3424 if let Some(_) = htlc.transaction_output_index {
3425 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3426 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3427 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3429 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3430 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3431 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3432 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3433 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3434 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3435 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3436 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3438 if !separate_nondust_htlc_sources {
3439 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3442 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3444 if separate_nondust_htlc_sources {
3445 if let Some(source) = source_opt.take() {
3446 nondust_htlc_sources.push(source);
3449 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3452 let holder_commitment_tx = HolderCommitmentTransaction::new(
3453 commitment_stats.tx,
3455 msg.htlc_signatures.clone(),
3456 &self.context.get_holder_pubkeys().funding_pubkey,
3457 self.context.counterparty_funding_pubkey()
3460 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3461 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3463 // Update state now that we've passed all the can-fail calls...
3464 let mut need_commitment = false;
3465 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3466 if *update_state == FeeUpdateState::RemoteAnnounced {
3467 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3468 need_commitment = true;
3472 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3473 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3474 Some(forward_info.clone())
3476 if let Some(forward_info) = new_forward {
3477 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3478 &htlc.payment_hash, &self.context.channel_id);
3479 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3480 need_commitment = true;
3483 let mut claimed_htlcs = Vec::new();
3484 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3485 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3486 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3487 &htlc.payment_hash, &self.context.channel_id);
3488 // Grab the preimage, if it exists, instead of cloning
3489 let mut reason = OutboundHTLCOutcome::Success(None);
3490 mem::swap(outcome, &mut reason);
3491 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3492 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3493 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3494 // have a `Success(None)` reason. In this case we could forget some HTLC
3495 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3496 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3498 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3500 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3501 need_commitment = true;
3505 self.context.latest_monitor_update_id += 1;
3506 let mut monitor_update = ChannelMonitorUpdate {
3507 update_id: self.context.latest_monitor_update_id,
3508 counterparty_node_id: Some(self.context.counterparty_node_id),
3509 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3510 commitment_tx: holder_commitment_tx,
3511 htlc_outputs: htlcs_and_sigs,
3513 nondust_htlc_sources,
3517 self.context.cur_holder_commitment_transaction_number -= 1;
3518 self.context.expecting_peer_commitment_signed = false;
3519 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3520 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3521 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3523 if self.context.channel_state.is_monitor_update_in_progress() {
3524 // In case we initially failed monitor updating without requiring a response, we need
3525 // to make sure the RAA gets sent first.
3526 self.context.monitor_pending_revoke_and_ack = true;
3527 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3528 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3529 // the corresponding HTLC status updates so that
3530 // get_last_commitment_update_for_send includes the right HTLCs.
3531 self.context.monitor_pending_commitment_signed = true;
3532 let mut additional_update = self.build_commitment_no_status_check(logger);
3533 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3534 // strictly increasing by one, so decrement it here.
3535 self.context.latest_monitor_update_id = monitor_update.update_id;
3536 monitor_update.updates.append(&mut additional_update.updates);
3538 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3539 &self.context.channel_id);
3540 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3543 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3544 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3545 // we'll send one right away when we get the revoke_and_ack when we
3546 // free_holding_cell_htlcs().
3547 let mut additional_update = self.build_commitment_no_status_check(logger);
3548 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3549 // strictly increasing by one, so decrement it here.
3550 self.context.latest_monitor_update_id = monitor_update.update_id;
3551 monitor_update.updates.append(&mut additional_update.updates);
3555 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3556 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3557 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3558 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3561 /// Public version of the below, checking relevant preconditions first.
3562 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3563 /// returns `(None, Vec::new())`.
3564 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3565 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3566 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3567 where F::Target: FeeEstimator, L::Target: Logger
3569 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
3570 self.free_holding_cell_htlcs(fee_estimator, logger)
3571 } else { (None, Vec::new()) }
3574 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3575 /// for our counterparty.
3576 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3577 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3578 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3579 where F::Target: FeeEstimator, L::Target: Logger
3581 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3582 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3583 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3584 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3586 let mut monitor_update = ChannelMonitorUpdate {
3587 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3588 counterparty_node_id: Some(self.context.counterparty_node_id),
3589 updates: Vec::new(),
3592 let mut htlc_updates = Vec::new();
3593 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3594 let mut update_add_count = 0;
3595 let mut update_fulfill_count = 0;
3596 let mut update_fail_count = 0;
3597 let mut htlcs_to_fail = Vec::new();
3598 for htlc_update in htlc_updates.drain(..) {
3599 // Note that this *can* fail, though it should be due to rather-rare conditions on
3600 // fee races with adding too many outputs which push our total payments just over
3601 // the limit. In case it's less rare than I anticipate, we may want to revisit
3602 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3603 // to rebalance channels.
3604 let fail_htlc_res = match &htlc_update {
3605 &HTLCUpdateAwaitingACK::AddHTLC {
3606 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3607 skimmed_fee_msat, blinding_point, ..
3609 match self.send_htlc(
3610 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3611 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3613 Ok(_) => update_add_count += 1,
3616 ChannelError::Ignore(ref msg) => {
3617 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3618 // If we fail to send here, then this HTLC should
3619 // be failed backwards. Failing to send here
3620 // indicates that this HTLC may keep being put back
3621 // into the holding cell without ever being
3622 // successfully forwarded/failed/fulfilled, causing
3623 // our counterparty to eventually close on us.
3624 htlcs_to_fail.push((source.clone(), *payment_hash));
3627 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3634 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3635 // If an HTLC claim was previously added to the holding cell (via
3636 // `get_update_fulfill_htlc`, then generating the claim message itself must
3637 // not fail - any in between attempts to claim the HTLC will have resulted
3638 // in it hitting the holding cell again and we cannot change the state of a
3639 // holding cell HTLC from fulfill to anything else.
3640 let mut additional_monitor_update =
3641 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3642 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3643 { monitor_update } else { unreachable!() };
3644 update_fulfill_count += 1;
3645 monitor_update.updates.append(&mut additional_monitor_update.updates);
3648 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3649 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
3650 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3652 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3653 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
3654 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3657 if let Some(res) = fail_htlc_res {
3659 Ok(fail_msg_opt) => {
3660 // If an HTLC failure was previously added to the holding cell (via
3661 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
3662 // not fail - we should never end up in a state where we double-fail
3663 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3664 // for a full revocation before failing.
3665 debug_assert!(fail_msg_opt.is_some());
3666 update_fail_count += 1;
3668 Err(ChannelError::Ignore(_)) => {},
3670 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3675 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3676 return (None, htlcs_to_fail);
3678 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3679 self.send_update_fee(feerate, false, fee_estimator, logger)
3684 let mut additional_update = self.build_commitment_no_status_check(logger);
3685 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3686 // but we want them to be strictly increasing by one, so reset it here.
3687 self.context.latest_monitor_update_id = monitor_update.update_id;
3688 monitor_update.updates.append(&mut additional_update.updates);
3690 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3691 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3692 update_add_count, update_fulfill_count, update_fail_count);
3694 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3695 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3701 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3702 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3703 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3704 /// generating an appropriate error *after* the channel state has been updated based on the
3705 /// revoke_and_ack message.
3706 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3707 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3708 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3709 where F::Target: FeeEstimator, L::Target: Logger,
3711 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3712 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3714 if self.context.channel_state.is_peer_disconnected() {
3715 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3717 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3718 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3721 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3723 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3724 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3725 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3729 if !self.context.channel_state.is_awaiting_remote_revoke() {
3730 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3731 // haven't given them a new commitment transaction to broadcast). We should probably
3732 // take advantage of this by updating our channel monitor, sending them an error, and
3733 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3734 // lot of work, and there's some chance this is all a misunderstanding anyway.
3735 // We have to do *something*, though, since our signer may get mad at us for otherwise
3736 // jumping a remote commitment number, so best to just force-close and move on.
3737 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3740 #[cfg(any(test, fuzzing))]
3742 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3743 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3746 match &self.context.holder_signer {
3747 ChannelSignerType::Ecdsa(ecdsa) => {
3748 ecdsa.validate_counterparty_revocation(
3749 self.context.cur_counterparty_commitment_transaction_number + 1,
3751 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3753 // TODO (taproot|arik)
3758 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3759 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3760 self.context.latest_monitor_update_id += 1;
3761 let mut monitor_update = ChannelMonitorUpdate {
3762 update_id: self.context.latest_monitor_update_id,
3763 counterparty_node_id: Some(self.context.counterparty_node_id),
3764 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3765 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3766 secret: msg.per_commitment_secret,
3770 // Update state now that we've passed all the can-fail calls...
3771 // (note that we may still fail to generate the new commitment_signed message, but that's
3772 // OK, we step the channel here and *then* if the new generation fails we can fail the
3773 // channel based on that, but stepping stuff here should be safe either way.
3774 self.context.channel_state.clear_awaiting_remote_revoke();
3775 self.context.sent_message_awaiting_response = None;
3776 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3777 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3778 self.context.cur_counterparty_commitment_transaction_number -= 1;
3780 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3781 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3784 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3785 let mut to_forward_infos = Vec::new();
3786 let mut revoked_htlcs = Vec::new();
3787 let mut finalized_claimed_htlcs = Vec::new();
3788 let mut update_fail_htlcs = Vec::new();
3789 let mut update_fail_malformed_htlcs = Vec::new();
3790 let mut require_commitment = false;
3791 let mut value_to_self_msat_diff: i64 = 0;
3794 // Take references explicitly so that we can hold multiple references to self.context.
3795 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3796 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3797 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3799 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3800 pending_inbound_htlcs.retain(|htlc| {
3801 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3802 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3803 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3804 value_to_self_msat_diff += htlc.amount_msat as i64;
3806 *expecting_peer_commitment_signed = true;
3810 pending_outbound_htlcs.retain(|htlc| {
3811 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3812 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3813 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3814 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3816 finalized_claimed_htlcs.push(htlc.source.clone());
3817 // They fulfilled, so we sent them money
3818 value_to_self_msat_diff -= htlc.amount_msat as i64;
3823 for htlc in pending_inbound_htlcs.iter_mut() {
3824 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3826 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3830 let mut state = InboundHTLCState::Committed;
3831 mem::swap(&mut state, &mut htlc.state);
3833 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3834 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3835 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3836 require_commitment = true;
3837 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3838 match forward_info {
3839 PendingHTLCStatus::Fail(fail_msg) => {
3840 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3841 require_commitment = true;
3843 HTLCFailureMsg::Relay(msg) => {
3844 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3845 update_fail_htlcs.push(msg)
3847 HTLCFailureMsg::Malformed(msg) => {
3848 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3849 update_fail_malformed_htlcs.push(msg)
3853 PendingHTLCStatus::Forward(forward_info) => {
3854 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3855 to_forward_infos.push((forward_info, htlc.htlc_id));
3856 htlc.state = InboundHTLCState::Committed;
3862 for htlc in pending_outbound_htlcs.iter_mut() {
3863 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3864 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3865 htlc.state = OutboundHTLCState::Committed;
3866 *expecting_peer_commitment_signed = true;
3868 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3869 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3870 // Grab the preimage, if it exists, instead of cloning
3871 let mut reason = OutboundHTLCOutcome::Success(None);
3872 mem::swap(outcome, &mut reason);
3873 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3874 require_commitment = true;
3878 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3880 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3881 match update_state {
3882 FeeUpdateState::Outbound => {
3883 debug_assert!(self.context.is_outbound());
3884 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3885 self.context.feerate_per_kw = feerate;
3886 self.context.pending_update_fee = None;
3887 self.context.expecting_peer_commitment_signed = true;
3889 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3890 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3891 debug_assert!(!self.context.is_outbound());
3892 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3893 require_commitment = true;
3894 self.context.feerate_per_kw = feerate;
3895 self.context.pending_update_fee = None;
3900 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3901 let release_state_str =
3902 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3903 macro_rules! return_with_htlcs_to_fail {
3904 ($htlcs_to_fail: expr) => {
3905 if !release_monitor {
3906 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3907 update: monitor_update,
3909 return Ok(($htlcs_to_fail, None));
3911 return Ok(($htlcs_to_fail, Some(monitor_update)));
3916 if self.context.channel_state.is_monitor_update_in_progress() {
3917 // We can't actually generate a new commitment transaction (incl by freeing holding
3918 // cells) while we can't update the monitor, so we just return what we have.
3919 if require_commitment {
3920 self.context.monitor_pending_commitment_signed = true;
3921 // When the monitor updating is restored we'll call
3922 // get_last_commitment_update_for_send(), which does not update state, but we're
3923 // definitely now awaiting a remote revoke before we can step forward any more, so
3925 let mut additional_update = self.build_commitment_no_status_check(logger);
3926 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3927 // strictly increasing by one, so decrement it here.
3928 self.context.latest_monitor_update_id = monitor_update.update_id;
3929 monitor_update.updates.append(&mut additional_update.updates);
3931 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3932 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3933 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3934 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3935 return_with_htlcs_to_fail!(Vec::new());
3938 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3939 (Some(mut additional_update), htlcs_to_fail) => {
3940 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3941 // strictly increasing by one, so decrement it here.
3942 self.context.latest_monitor_update_id = monitor_update.update_id;
3943 monitor_update.updates.append(&mut additional_update.updates);
3945 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3946 &self.context.channel_id(), release_state_str);
3948 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3949 return_with_htlcs_to_fail!(htlcs_to_fail);
3951 (None, htlcs_to_fail) => {
3952 if require_commitment {
3953 let mut additional_update = self.build_commitment_no_status_check(logger);
3955 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3956 // strictly increasing by one, so decrement it here.
3957 self.context.latest_monitor_update_id = monitor_update.update_id;
3958 monitor_update.updates.append(&mut additional_update.updates);
3960 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3961 &self.context.channel_id(),
3962 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3965 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3966 return_with_htlcs_to_fail!(htlcs_to_fail);
3968 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3969 &self.context.channel_id(), release_state_str);
3971 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3972 return_with_htlcs_to_fail!(htlcs_to_fail);
3978 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3979 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3980 /// commitment update.
3981 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3982 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3983 where F::Target: FeeEstimator, L::Target: Logger
3985 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3986 assert!(msg_opt.is_none(), "We forced holding cell?");
3989 /// Adds a pending update to this channel. See the doc for send_htlc for
3990 /// further details on the optionness of the return value.
3991 /// If our balance is too low to cover the cost of the next commitment transaction at the
3992 /// new feerate, the update is cancelled.
3994 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3995 /// [`Channel`] if `force_holding_cell` is false.
3996 fn send_update_fee<F: Deref, L: Deref>(
3997 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3998 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3999 ) -> Option<msgs::UpdateFee>
4000 where F::Target: FeeEstimator, L::Target: Logger
4002 if !self.context.is_outbound() {
4003 panic!("Cannot send fee from inbound channel");
4005 if !self.context.is_usable() {
4006 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4008 if !self.context.is_live() {
4009 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4012 // Before proposing a feerate update, check that we can actually afford the new fee.
4013 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4014 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4015 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4016 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4017 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4018 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4019 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4020 //TODO: auto-close after a number of failures?
4021 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4025 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4026 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4027 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4028 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4029 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4030 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4033 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4034 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4038 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4039 force_holding_cell = true;
4042 if force_holding_cell {
4043 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4047 debug_assert!(self.context.pending_update_fee.is_none());
4048 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4050 Some(msgs::UpdateFee {
4051 channel_id: self.context.channel_id,
4056 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4057 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4059 /// No further message handling calls may be made until a channel_reestablish dance has
4061 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4062 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4063 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4064 if self.context.channel_state.is_pre_funded_state() {
4068 if self.context.channel_state.is_peer_disconnected() {
4069 // While the below code should be idempotent, it's simpler to just return early, as
4070 // redundant disconnect events can fire, though they should be rare.
4074 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4075 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4078 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4079 // will be retransmitted.
4080 self.context.last_sent_closing_fee = None;
4081 self.context.pending_counterparty_closing_signed = None;
4082 self.context.closing_fee_limits = None;
4084 let mut inbound_drop_count = 0;
4085 self.context.pending_inbound_htlcs.retain(|htlc| {
4087 InboundHTLCState::RemoteAnnounced(_) => {
4088 // They sent us an update_add_htlc but we never got the commitment_signed.
4089 // We'll tell them what commitment_signed we're expecting next and they'll drop
4090 // this HTLC accordingly
4091 inbound_drop_count += 1;
4094 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4095 // We received a commitment_signed updating this HTLC and (at least hopefully)
4096 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4097 // in response to it yet, so don't touch it.
4100 InboundHTLCState::Committed => true,
4101 InboundHTLCState::LocalRemoved(_) => {
4102 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4103 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4104 // (that we missed). Keep this around for now and if they tell us they missed
4105 // the commitment_signed we can re-transmit the update then.
4110 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4112 if let Some((_, update_state)) = self.context.pending_update_fee {
4113 if update_state == FeeUpdateState::RemoteAnnounced {
4114 debug_assert!(!self.context.is_outbound());
4115 self.context.pending_update_fee = None;
4119 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4120 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4121 // They sent us an update to remove this but haven't yet sent the corresponding
4122 // commitment_signed, we need to move it back to Committed and they can re-send
4123 // the update upon reconnection.
4124 htlc.state = OutboundHTLCState::Committed;
4128 self.context.sent_message_awaiting_response = None;
4130 self.context.channel_state.set_peer_disconnected();
4131 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4135 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4136 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4137 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4138 /// update completes (potentially immediately).
4139 /// The messages which were generated with the monitor update must *not* have been sent to the
4140 /// remote end, and must instead have been dropped. They will be regenerated when
4141 /// [`Self::monitor_updating_restored`] is called.
4143 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4144 /// [`chain::Watch`]: crate::chain::Watch
4145 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4146 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4147 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4148 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4149 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4151 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4152 self.context.monitor_pending_commitment_signed |= resend_commitment;
4153 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4154 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4155 self.context.monitor_pending_failures.append(&mut pending_fails);
4156 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4157 self.context.channel_state.set_monitor_update_in_progress();
4160 /// Indicates that the latest ChannelMonitor update has been committed by the client
4161 /// successfully and we should restore normal operation. Returns messages which should be sent
4162 /// to the remote side.
4163 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4164 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4165 user_config: &UserConfig, best_block_height: u32
4166 ) -> MonitorRestoreUpdates
4169 NS::Target: NodeSigner
4171 assert!(self.context.channel_state.is_monitor_update_in_progress());
4172 self.context.channel_state.clear_monitor_update_in_progress();
4174 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4175 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4176 // first received the funding_signed.
4177 let mut funding_broadcastable =
4178 if self.context.is_outbound() &&
4179 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4180 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
4182 self.context.funding_transaction.take()
4184 // That said, if the funding transaction is already confirmed (ie we're active with a
4185 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4186 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4187 funding_broadcastable = None;
4190 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4191 // (and we assume the user never directly broadcasts the funding transaction and waits for
4192 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4193 // * an inbound channel that failed to persist the monitor on funding_created and we got
4194 // the funding transaction confirmed before the monitor was persisted, or
4195 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4196 let channel_ready = if self.context.monitor_pending_channel_ready {
4197 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4198 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4199 self.context.monitor_pending_channel_ready = false;
4200 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4201 Some(msgs::ChannelReady {
4202 channel_id: self.context.channel_id(),
4203 next_per_commitment_point,
4204 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4208 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4210 let mut accepted_htlcs = Vec::new();
4211 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4212 let mut failed_htlcs = Vec::new();
4213 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4214 let mut finalized_claimed_htlcs = Vec::new();
4215 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4217 if self.context.channel_state.is_peer_disconnected() {
4218 self.context.monitor_pending_revoke_and_ack = false;
4219 self.context.monitor_pending_commitment_signed = false;
4220 return MonitorRestoreUpdates {
4221 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4222 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4226 let raa = if self.context.monitor_pending_revoke_and_ack {
4227 Some(self.get_last_revoke_and_ack())
4229 let commitment_update = if self.context.monitor_pending_commitment_signed {
4230 self.get_last_commitment_update_for_send(logger).ok()
4232 if commitment_update.is_some() {
4233 self.mark_awaiting_response();
4236 self.context.monitor_pending_revoke_and_ack = false;
4237 self.context.monitor_pending_commitment_signed = false;
4238 let order = self.context.resend_order.clone();
4239 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4240 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4241 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4242 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4243 MonitorRestoreUpdates {
4244 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4248 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4249 where F::Target: FeeEstimator, L::Target: Logger
4251 if self.context.is_outbound() {
4252 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4254 if self.context.channel_state.is_peer_disconnected() {
4255 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4257 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4259 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4260 self.context.update_time_counter += 1;
4261 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4262 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4263 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4264 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4265 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4266 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4267 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4268 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4269 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4270 msg.feerate_per_kw, holder_tx_dust_exposure)));
4272 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4273 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4274 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4280 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4282 #[cfg(async_signing)]
4283 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4284 let commitment_update = if self.context.signer_pending_commitment_update {
4285 self.get_last_commitment_update_for_send(logger).ok()
4287 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4288 self.context.get_funding_signed_msg(logger).1
4290 let channel_ready = if funding_signed.is_some() {
4291 self.check_get_channel_ready(0)
4294 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4295 if commitment_update.is_some() { "a" } else { "no" },
4296 if funding_signed.is_some() { "a" } else { "no" },
4297 if channel_ready.is_some() { "a" } else { "no" });
4299 SignerResumeUpdates {
4306 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4307 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4308 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4309 msgs::RevokeAndACK {
4310 channel_id: self.context.channel_id,
4311 per_commitment_secret,
4312 next_per_commitment_point,
4314 next_local_nonce: None,
4318 /// Gets the last commitment update for immediate sending to our peer.
4319 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4320 let mut update_add_htlcs = Vec::new();
4321 let mut update_fulfill_htlcs = Vec::new();
4322 let mut update_fail_htlcs = Vec::new();
4323 let mut update_fail_malformed_htlcs = Vec::new();
4325 for htlc in self.context.pending_outbound_htlcs.iter() {
4326 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4327 update_add_htlcs.push(msgs::UpdateAddHTLC {
4328 channel_id: self.context.channel_id(),
4329 htlc_id: htlc.htlc_id,
4330 amount_msat: htlc.amount_msat,
4331 payment_hash: htlc.payment_hash,
4332 cltv_expiry: htlc.cltv_expiry,
4333 onion_routing_packet: (**onion_packet).clone(),
4334 skimmed_fee_msat: htlc.skimmed_fee_msat,
4335 blinding_point: htlc.blinding_point,
4340 for htlc in self.context.pending_inbound_htlcs.iter() {
4341 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4343 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4344 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4345 channel_id: self.context.channel_id(),
4346 htlc_id: htlc.htlc_id,
4347 reason: err_packet.clone()
4350 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4351 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4352 channel_id: self.context.channel_id(),
4353 htlc_id: htlc.htlc_id,
4354 sha256_of_onion: sha256_of_onion.clone(),
4355 failure_code: failure_code.clone(),
4358 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4359 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4360 channel_id: self.context.channel_id(),
4361 htlc_id: htlc.htlc_id,
4362 payment_preimage: payment_preimage.clone(),
4369 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4370 Some(msgs::UpdateFee {
4371 channel_id: self.context.channel_id(),
4372 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4376 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4377 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4378 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4379 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4380 if self.context.signer_pending_commitment_update {
4381 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4382 self.context.signer_pending_commitment_update = false;
4386 #[cfg(not(async_signing))] {
4387 panic!("Failed to get signature for new commitment state");
4389 #[cfg(async_signing)] {
4390 if !self.context.signer_pending_commitment_update {
4391 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4392 self.context.signer_pending_commitment_update = true;
4397 Ok(msgs::CommitmentUpdate {
4398 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4403 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4404 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4405 if self.context.channel_state.is_local_shutdown_sent() {
4406 assert!(self.context.shutdown_scriptpubkey.is_some());
4407 Some(msgs::Shutdown {
4408 channel_id: self.context.channel_id,
4409 scriptpubkey: self.get_closing_scriptpubkey(),
4414 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4415 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4417 /// Some links printed in log lines are included here to check them during build (when run with
4418 /// `cargo doc --document-private-items`):
4419 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4420 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4421 pub fn channel_reestablish<L: Deref, NS: Deref>(
4422 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4423 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4424 ) -> Result<ReestablishResponses, ChannelError>
4427 NS::Target: NodeSigner
4429 if !self.context.channel_state.is_peer_disconnected() {
4430 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4431 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4432 // just close here instead of trying to recover.
4433 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4436 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4437 msg.next_local_commitment_number == 0 {
4438 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4441 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4442 if msg.next_remote_commitment_number > 0 {
4443 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4444 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4445 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4446 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4447 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4449 if msg.next_remote_commitment_number > our_commitment_transaction {
4450 macro_rules! log_and_panic {
4451 ($err_msg: expr) => {
4452 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4453 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4456 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4457 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4458 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4459 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4460 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4461 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4462 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4463 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4467 // Before we change the state of the channel, we check if the peer is sending a very old
4468 // commitment transaction number, if yes we send a warning message.
4469 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4470 return Err(ChannelError::Warn(format!(
4471 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4472 msg.next_remote_commitment_number,
4473 our_commitment_transaction
4477 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4478 // remaining cases either succeed or ErrorMessage-fail).
4479 self.context.channel_state.clear_peer_disconnected();
4480 self.context.sent_message_awaiting_response = None;
4482 let shutdown_msg = self.get_outbound_shutdown();
4484 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4486 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4487 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4488 if !self.context.channel_state.is_our_channel_ready() ||
4489 self.context.channel_state.is_monitor_update_in_progress() {
4490 if msg.next_remote_commitment_number != 0 {
4491 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4493 // Short circuit the whole handler as there is nothing we can resend them
4494 return Ok(ReestablishResponses {
4495 channel_ready: None,
4496 raa: None, commitment_update: None,
4497 order: RAACommitmentOrder::CommitmentFirst,
4498 shutdown_msg, announcement_sigs,
4502 // We have OurChannelReady set!
4503 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4504 return Ok(ReestablishResponses {
4505 channel_ready: Some(msgs::ChannelReady {
4506 channel_id: self.context.channel_id(),
4507 next_per_commitment_point,
4508 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4510 raa: None, commitment_update: None,
4511 order: RAACommitmentOrder::CommitmentFirst,
4512 shutdown_msg, announcement_sigs,
4516 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4517 // Remote isn't waiting on any RevokeAndACK from us!
4518 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4520 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4521 if self.context.channel_state.is_monitor_update_in_progress() {
4522 self.context.monitor_pending_revoke_and_ack = true;
4525 Some(self.get_last_revoke_and_ack())
4528 debug_assert!(false, "All values should have been handled in the four cases above");
4529 return Err(ChannelError::Close(format!(
4530 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4531 msg.next_remote_commitment_number,
4532 our_commitment_transaction
4536 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4537 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4538 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4539 // the corresponding revoke_and_ack back yet.
4540 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4541 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4542 self.mark_awaiting_response();
4544 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4546 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4547 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4548 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4549 Some(msgs::ChannelReady {
4550 channel_id: self.context.channel_id(),
4551 next_per_commitment_point,
4552 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4556 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4557 if required_revoke.is_some() {
4558 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4560 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4563 Ok(ReestablishResponses {
4564 channel_ready, shutdown_msg, announcement_sigs,
4565 raa: required_revoke,
4566 commitment_update: None,
4567 order: self.context.resend_order.clone(),
4569 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4570 if required_revoke.is_some() {
4571 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4573 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4576 if self.context.channel_state.is_monitor_update_in_progress() {
4577 self.context.monitor_pending_commitment_signed = true;
4578 Ok(ReestablishResponses {
4579 channel_ready, shutdown_msg, announcement_sigs,
4580 commitment_update: None, raa: None,
4581 order: self.context.resend_order.clone(),
4584 Ok(ReestablishResponses {
4585 channel_ready, shutdown_msg, announcement_sigs,
4586 raa: required_revoke,
4587 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4588 order: self.context.resend_order.clone(),
4591 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4592 Err(ChannelError::Close(format!(
4593 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4594 msg.next_local_commitment_number,
4595 next_counterparty_commitment_number,
4598 Err(ChannelError::Close(format!(
4599 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4600 msg.next_local_commitment_number,
4601 next_counterparty_commitment_number,
4606 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4607 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4608 /// at which point they will be recalculated.
4609 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4611 where F::Target: FeeEstimator
4613 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4615 // Propose a range from our current Background feerate to our Normal feerate plus our
4616 // force_close_avoidance_max_fee_satoshis.
4617 // If we fail to come to consensus, we'll have to force-close.
4618 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4619 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4620 // that we don't expect to need fee bumping
4621 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4622 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4624 // The spec requires that (when the channel does not have anchors) we only send absolute
4625 // channel fees no greater than the absolute channel fee on the current commitment
4626 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4627 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4628 // some force-closure by old nodes, but we wanted to close the channel anyway.
4630 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4631 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4632 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4633 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4636 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4637 // below our dust limit, causing the output to disappear. We don't bother handling this
4638 // case, however, as this should only happen if a channel is closed before any (material)
4639 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4640 // come to consensus with our counterparty on appropriate fees, however it should be a
4641 // relatively rare case. We can revisit this later, though note that in order to determine
4642 // if the funders' output is dust we have to know the absolute fee we're going to use.
4643 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4644 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4645 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4646 // We always add force_close_avoidance_max_fee_satoshis to our normal
4647 // feerate-calculated fee, but allow the max to be overridden if we're using a
4648 // target feerate-calculated fee.
4649 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4650 proposed_max_feerate as u64 * tx_weight / 1000)
4652 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4655 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4656 self.context.closing_fee_limits.clone().unwrap()
4659 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4660 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4661 /// this point if we're the funder we should send the initial closing_signed, and in any case
4662 /// shutdown should complete within a reasonable timeframe.
4663 fn closing_negotiation_ready(&self) -> bool {
4664 self.context.closing_negotiation_ready()
4667 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4668 /// an Err if no progress is being made and the channel should be force-closed instead.
4669 /// Should be called on a one-minute timer.
4670 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4671 if self.closing_negotiation_ready() {
4672 if self.context.closing_signed_in_flight {
4673 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4675 self.context.closing_signed_in_flight = true;
4681 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4682 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4683 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4684 where F::Target: FeeEstimator, L::Target: Logger
4686 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4687 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4688 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4689 // that closing_negotiation_ready checks this case (as well as a few others).
4690 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4691 return Ok((None, None, None));
4694 if !self.context.is_outbound() {
4695 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4696 return self.closing_signed(fee_estimator, &msg);
4698 return Ok((None, None, None));
4701 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4702 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4703 if self.context.expecting_peer_commitment_signed {
4704 return Ok((None, None, None));
4707 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4709 assert!(self.context.shutdown_scriptpubkey.is_some());
4710 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4711 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4712 our_min_fee, our_max_fee, total_fee_satoshis);
4714 match &self.context.holder_signer {
4715 ChannelSignerType::Ecdsa(ecdsa) => {
4717 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4718 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4720 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4721 Ok((Some(msgs::ClosingSigned {
4722 channel_id: self.context.channel_id,
4723 fee_satoshis: total_fee_satoshis,
4725 fee_range: Some(msgs::ClosingSignedFeeRange {
4726 min_fee_satoshis: our_min_fee,
4727 max_fee_satoshis: our_max_fee,
4731 // TODO (taproot|arik)
4737 // Marks a channel as waiting for a response from the counterparty. If it's not received
4738 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4740 fn mark_awaiting_response(&mut self) {
4741 self.context.sent_message_awaiting_response = Some(0);
4744 /// Determines whether we should disconnect the counterparty due to not receiving a response
4745 /// within our expected timeframe.
4747 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4748 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4749 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4752 // Don't disconnect when we're not waiting on a response.
4755 *ticks_elapsed += 1;
4756 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4760 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4761 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4763 if self.context.channel_state.is_peer_disconnected() {
4764 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4766 if self.context.channel_state.is_pre_funded_state() {
4767 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4768 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4769 // can do that via error message without getting a connection fail anyway...
4770 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4772 for htlc in self.context.pending_inbound_htlcs.iter() {
4773 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4774 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4777 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4779 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4780 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4783 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4784 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4785 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4788 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4791 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4792 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4793 // any further commitment updates after we set LocalShutdownSent.
4794 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4796 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4799 assert!(send_shutdown);
4800 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4801 Ok(scriptpubkey) => scriptpubkey,
4802 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4804 if !shutdown_scriptpubkey.is_compatible(their_features) {
4805 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4807 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4812 // From here on out, we may not fail!
4814 self.context.channel_state.set_remote_shutdown_sent();
4815 self.context.update_time_counter += 1;
4817 let monitor_update = if update_shutdown_script {
4818 self.context.latest_monitor_update_id += 1;
4819 let monitor_update = ChannelMonitorUpdate {
4820 update_id: self.context.latest_monitor_update_id,
4821 counterparty_node_id: Some(self.context.counterparty_node_id),
4822 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4823 scriptpubkey: self.get_closing_scriptpubkey(),
4826 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4827 self.push_ret_blockable_mon_update(monitor_update)
4829 let shutdown = if send_shutdown {
4830 Some(msgs::Shutdown {
4831 channel_id: self.context.channel_id,
4832 scriptpubkey: self.get_closing_scriptpubkey(),
4836 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4837 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4838 // cell HTLCs and return them to fail the payment.
4839 self.context.holding_cell_update_fee = None;
4840 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4841 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4843 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4844 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4851 self.context.channel_state.set_local_shutdown_sent();
4852 self.context.update_time_counter += 1;
4854 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4857 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4858 let mut tx = closing_tx.trust().built_transaction().clone();
4860 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4862 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4863 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4864 let mut holder_sig = sig.serialize_der().to_vec();
4865 holder_sig.push(EcdsaSighashType::All as u8);
4866 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4867 cp_sig.push(EcdsaSighashType::All as u8);
4868 if funding_key[..] < counterparty_funding_key[..] {
4869 tx.input[0].witness.push(holder_sig);
4870 tx.input[0].witness.push(cp_sig);
4872 tx.input[0].witness.push(cp_sig);
4873 tx.input[0].witness.push(holder_sig);
4876 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4880 pub fn closing_signed<F: Deref>(
4881 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4882 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4883 where F::Target: FeeEstimator
4885 if !self.context.channel_state.is_both_sides_shutdown() {
4886 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4888 if self.context.channel_state.is_peer_disconnected() {
4889 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4891 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4892 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4894 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4895 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4898 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4899 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4902 if self.context.channel_state.is_monitor_update_in_progress() {
4903 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4904 return Ok((None, None, None));
4907 let funding_redeemscript = self.context.get_funding_redeemscript();
4908 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4909 if used_total_fee != msg.fee_satoshis {
4910 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4912 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4914 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4917 // The remote end may have decided to revoke their output due to inconsistent dust
4918 // limits, so check for that case by re-checking the signature here.
4919 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4920 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4921 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4925 for outp in closing_tx.trust().built_transaction().output.iter() {
4926 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4927 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4931 assert!(self.context.shutdown_scriptpubkey.is_some());
4932 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4933 if last_fee == msg.fee_satoshis {
4934 let shutdown_result = ShutdownResult {
4935 closure_reason: ClosureReason::CooperativeClosure,
4936 monitor_update: None,
4937 dropped_outbound_htlcs: Vec::new(),
4938 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4939 channel_id: self.context.channel_id,
4940 user_channel_id: self.context.user_id,
4941 channel_capacity_satoshis: self.context.channel_value_satoshis,
4942 counterparty_node_id: self.context.counterparty_node_id,
4943 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4945 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4946 self.context.channel_state = ChannelState::ShutdownComplete;
4947 self.context.update_time_counter += 1;
4948 return Ok((None, Some(tx), Some(shutdown_result)));
4952 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4954 macro_rules! propose_fee {
4955 ($new_fee: expr) => {
4956 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4957 (closing_tx, $new_fee)
4959 self.build_closing_transaction($new_fee, false)
4962 return match &self.context.holder_signer {
4963 ChannelSignerType::Ecdsa(ecdsa) => {
4965 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4966 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4967 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4968 let shutdown_result = ShutdownResult {
4969 closure_reason: ClosureReason::CooperativeClosure,
4970 monitor_update: None,
4971 dropped_outbound_htlcs: Vec::new(),
4972 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4973 channel_id: self.context.channel_id,
4974 user_channel_id: self.context.user_id,
4975 channel_capacity_satoshis: self.context.channel_value_satoshis,
4976 counterparty_node_id: self.context.counterparty_node_id,
4977 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4979 self.context.channel_state = ChannelState::ShutdownComplete;
4980 self.context.update_time_counter += 1;
4981 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4982 (Some(tx), Some(shutdown_result))
4987 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4988 Ok((Some(msgs::ClosingSigned {
4989 channel_id: self.context.channel_id,
4990 fee_satoshis: used_fee,
4992 fee_range: Some(msgs::ClosingSignedFeeRange {
4993 min_fee_satoshis: our_min_fee,
4994 max_fee_satoshis: our_max_fee,
4996 }), signed_tx, shutdown_result))
4998 // TODO (taproot|arik)
5005 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5006 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5007 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5009 if max_fee_satoshis < our_min_fee {
5010 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5012 if min_fee_satoshis > our_max_fee {
5013 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5016 if !self.context.is_outbound() {
5017 // They have to pay, so pick the highest fee in the overlapping range.
5018 // We should never set an upper bound aside from their full balance
5019 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5020 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5022 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5023 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5024 msg.fee_satoshis, our_min_fee, our_max_fee)));
5026 // The proposed fee is in our acceptable range, accept it and broadcast!
5027 propose_fee!(msg.fee_satoshis);
5030 // Old fee style negotiation. We don't bother to enforce whether they are complying
5031 // with the "making progress" requirements, we just comply and hope for the best.
5032 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5033 if msg.fee_satoshis > last_fee {
5034 if msg.fee_satoshis < our_max_fee {
5035 propose_fee!(msg.fee_satoshis);
5036 } else if last_fee < our_max_fee {
5037 propose_fee!(our_max_fee);
5039 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5042 if msg.fee_satoshis > our_min_fee {
5043 propose_fee!(msg.fee_satoshis);
5044 } else if last_fee > our_min_fee {
5045 propose_fee!(our_min_fee);
5047 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5051 if msg.fee_satoshis < our_min_fee {
5052 propose_fee!(our_min_fee);
5053 } else if msg.fee_satoshis > our_max_fee {
5054 propose_fee!(our_max_fee);
5056 propose_fee!(msg.fee_satoshis);
5062 fn internal_htlc_satisfies_config(
5063 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5064 ) -> Result<(), (&'static str, u16)> {
5065 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5066 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5067 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5068 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5070 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5071 0x1000 | 12, // fee_insufficient
5074 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5076 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5077 0x1000 | 13, // incorrect_cltv_expiry
5083 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5084 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5085 /// unsuccessful, falls back to the previous one if one exists.
5086 pub fn htlc_satisfies_config(
5087 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5088 ) -> Result<(), (&'static str, u16)> {
5089 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5091 if let Some(prev_config) = self.context.prev_config() {
5092 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5099 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5100 self.context.cur_holder_commitment_transaction_number + 1
5103 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5104 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5107 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5108 self.context.cur_counterparty_commitment_transaction_number + 2
5112 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5113 &self.context.holder_signer
5117 pub fn get_value_stat(&self) -> ChannelValueStat {
5119 value_to_self_msat: self.context.value_to_self_msat,
5120 channel_value_msat: self.context.channel_value_satoshis * 1000,
5121 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5122 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5123 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5124 holding_cell_outbound_amount_msat: {
5126 for h in self.context.holding_cell_htlc_updates.iter() {
5128 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5136 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5137 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5141 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5142 /// Allowed in any state (including after shutdown)
5143 pub fn is_awaiting_monitor_update(&self) -> bool {
5144 self.context.channel_state.is_monitor_update_in_progress()
5147 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5148 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5149 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5150 self.context.blocked_monitor_updates[0].update.update_id - 1
5153 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5154 /// further blocked monitor update exists after the next.
5155 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5156 if self.context.blocked_monitor_updates.is_empty() { return None; }
5157 Some((self.context.blocked_monitor_updates.remove(0).update,
5158 !self.context.blocked_monitor_updates.is_empty()))
5161 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5162 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5163 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5164 -> Option<ChannelMonitorUpdate> {
5165 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5166 if !release_monitor {
5167 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5176 pub fn blocked_monitor_updates_pending(&self) -> usize {
5177 self.context.blocked_monitor_updates.len()
5180 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5181 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5182 /// transaction. If the channel is inbound, this implies simply that the channel has not
5184 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5185 if !self.is_awaiting_monitor_update() { return false; }
5187 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5188 if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5190 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5191 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5192 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5195 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5196 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5197 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5198 // waiting for the initial monitor persistence. Thus, we check if our commitment
5199 // transaction numbers have both been iterated only exactly once (for the
5200 // funding_signed), and we're awaiting monitor update.
5202 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5203 // only way to get an awaiting-monitor-update state during initial funding is if the
5204 // initial monitor persistence is still pending).
5206 // Because deciding we're awaiting initial broadcast spuriously could result in
5207 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5208 // we hard-assert here, even in production builds.
5209 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5210 assert!(self.context.monitor_pending_channel_ready);
5211 assert_eq!(self.context.latest_monitor_update_id, 0);
5217 /// Returns true if our channel_ready has been sent
5218 pub fn is_our_channel_ready(&self) -> bool {
5219 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5220 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5223 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5224 pub fn received_shutdown(&self) -> bool {
5225 self.context.channel_state.is_remote_shutdown_sent()
5228 /// Returns true if we either initiated or agreed to shut down the channel.
5229 pub fn sent_shutdown(&self) -> bool {
5230 self.context.channel_state.is_local_shutdown_sent()
5233 /// Returns true if this channel is fully shut down. True here implies that no further actions
5234 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5235 /// will be handled appropriately by the chain monitor.
5236 pub fn is_shutdown(&self) -> bool {
5237 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5240 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5241 self.context.channel_update_status
5244 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5245 self.context.update_time_counter += 1;
5246 self.context.channel_update_status = status;
5249 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5251 // * always when a new block/transactions are confirmed with the new height
5252 // * when funding is signed with a height of 0
5253 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5257 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5258 if funding_tx_confirmations <= 0 {
5259 self.context.funding_tx_confirmation_height = 0;
5262 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5266 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5267 // channel_ready yet.
5268 if self.context.signer_pending_funding {
5272 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5273 // channel_ready until the entire batch is ready.
5274 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5275 self.context.channel_state.set_our_channel_ready();
5277 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5278 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5279 self.context.update_time_counter += 1;
5281 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5282 // We got a reorg but not enough to trigger a force close, just ignore.
5285 if self.context.funding_tx_confirmation_height != 0 &&
5286 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5288 // We should never see a funding transaction on-chain until we've received
5289 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5290 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5291 // however, may do this and we shouldn't treat it as a bug.
5292 #[cfg(not(fuzzing))]
5293 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5294 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5295 self.context.channel_state.to_u32());
5297 // We got a reorg but not enough to trigger a force close, just ignore.
5301 if need_commitment_update {
5302 if !self.context.channel_state.is_monitor_update_in_progress() {
5303 if !self.context.channel_state.is_peer_disconnected() {
5304 let next_per_commitment_point =
5305 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5306 return Some(msgs::ChannelReady {
5307 channel_id: self.context.channel_id,
5308 next_per_commitment_point,
5309 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5313 self.context.monitor_pending_channel_ready = true;
5319 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5320 /// In the first case, we store the confirmation height and calculating the short channel id.
5321 /// In the second, we simply return an Err indicating we need to be force-closed now.
5322 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5323 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5324 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5325 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5327 NS::Target: NodeSigner,
5330 let mut msgs = (None, None);
5331 if let Some(funding_txo) = self.context.get_funding_txo() {
5332 for &(index_in_block, tx) in txdata.iter() {
5333 // Check if the transaction is the expected funding transaction, and if it is,
5334 // check that it pays the right amount to the right script.
5335 if self.context.funding_tx_confirmation_height == 0 {
5336 if tx.txid() == funding_txo.txid {
5337 let txo_idx = funding_txo.index as usize;
5338 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5339 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5340 if self.context.is_outbound() {
5341 // If we generated the funding transaction and it doesn't match what it
5342 // should, the client is really broken and we should just panic and
5343 // tell them off. That said, because hash collisions happen with high
5344 // probability in fuzzing mode, if we're fuzzing we just close the
5345 // channel and move on.
5346 #[cfg(not(fuzzing))]
5347 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5349 self.context.update_time_counter += 1;
5350 let err_reason = "funding tx had wrong script/value or output index";
5351 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5353 if self.context.is_outbound() {
5354 if !tx.is_coin_base() {
5355 for input in tx.input.iter() {
5356 if input.witness.is_empty() {
5357 // We generated a malleable funding transaction, implying we've
5358 // just exposed ourselves to funds loss to our counterparty.
5359 #[cfg(not(fuzzing))]
5360 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5365 self.context.funding_tx_confirmation_height = height;
5366 self.context.funding_tx_confirmed_in = Some(*block_hash);
5367 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5368 Ok(scid) => Some(scid),
5369 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5372 // If this is a coinbase transaction and not a 0-conf channel
5373 // we should update our min_depth to 100 to handle coinbase maturity
5374 if tx.is_coin_base() &&
5375 self.context.minimum_depth.unwrap_or(0) > 0 &&
5376 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5377 self.context.minimum_depth = Some(COINBASE_MATURITY);
5380 // If we allow 1-conf funding, we may need to check for channel_ready here and
5381 // send it immediately instead of waiting for a best_block_updated call (which
5382 // may have already happened for this block).
5383 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5384 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5385 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5386 msgs = (Some(channel_ready), announcement_sigs);
5389 for inp in tx.input.iter() {
5390 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5391 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5392 return Err(ClosureReason::CommitmentTxConfirmed);
5400 /// When a new block is connected, we check the height of the block against outbound holding
5401 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5402 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5403 /// handled by the ChannelMonitor.
5405 /// If we return Err, the channel may have been closed, at which point the standard
5406 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5409 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5411 pub fn best_block_updated<NS: Deref, L: Deref>(
5412 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5413 node_signer: &NS, user_config: &UserConfig, logger: &L
5414 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5416 NS::Target: NodeSigner,
5419 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5422 fn do_best_block_updated<NS: Deref, L: Deref>(
5423 &mut self, height: u32, highest_header_time: u32,
5424 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5425 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5427 NS::Target: NodeSigner,
5430 let mut timed_out_htlcs = Vec::new();
5431 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5432 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5434 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5435 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5437 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5438 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5439 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5447 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5449 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5450 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5451 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5453 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5454 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5457 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5458 self.context.channel_state.is_our_channel_ready() {
5459 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5460 if self.context.funding_tx_confirmation_height == 0 {
5461 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5462 // zero if it has been reorged out, however in either case, our state flags
5463 // indicate we've already sent a channel_ready
5464 funding_tx_confirmations = 0;
5467 // If we've sent channel_ready (or have both sent and received channel_ready), and
5468 // the funding transaction has become unconfirmed,
5469 // close the channel and hope we can get the latest state on chain (because presumably
5470 // the funding transaction is at least still in the mempool of most nodes).
5472 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5473 // 0-conf channel, but not doing so may lead to the
5474 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5476 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5477 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5478 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5479 return Err(ClosureReason::ProcessingError { err: err_reason });
5481 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5482 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5483 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5484 // If funding_tx_confirmed_in is unset, the channel must not be active
5485 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5486 assert!(!self.context.channel_state.is_our_channel_ready());
5487 return Err(ClosureReason::FundingTimedOut);
5490 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5491 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5493 Ok((None, timed_out_htlcs, announcement_sigs))
5496 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5497 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5498 /// before the channel has reached channel_ready and we can just wait for more blocks.
5499 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5500 if self.context.funding_tx_confirmation_height != 0 {
5501 // We handle the funding disconnection by calling best_block_updated with a height one
5502 // below where our funding was connected, implying a reorg back to conf_height - 1.
5503 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5504 // We use the time field to bump the current time we set on channel updates if its
5505 // larger. If we don't know that time has moved forward, we can just set it to the last
5506 // time we saw and it will be ignored.
5507 let best_time = self.context.update_time_counter;
5508 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5509 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5510 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5511 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5512 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5518 // We never learned about the funding confirmation anyway, just ignore
5523 // Methods to get unprompted messages to send to the remote end (or where we already returned
5524 // something in the handler for the message that prompted this message):
5526 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5527 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5528 /// directions). Should be used for both broadcasted announcements and in response to an
5529 /// AnnouncementSignatures message from the remote peer.
5531 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5534 /// This will only return ChannelError::Ignore upon failure.
5536 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5537 fn get_channel_announcement<NS: Deref>(
5538 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5539 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5540 if !self.context.config.announced_channel {
5541 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5543 if !self.context.is_usable() {
5544 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5547 let short_channel_id = self.context.get_short_channel_id()
5548 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5549 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5550 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5551 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5552 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5554 let msg = msgs::UnsignedChannelAnnouncement {
5555 features: channelmanager::provided_channel_features(&user_config),
5558 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5559 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5560 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5561 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5562 excess_data: Vec::new(),
5568 fn get_announcement_sigs<NS: Deref, L: Deref>(
5569 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5570 best_block_height: u32, logger: &L
5571 ) -> Option<msgs::AnnouncementSignatures>
5573 NS::Target: NodeSigner,
5576 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5580 if !self.context.is_usable() {
5584 if self.context.channel_state.is_peer_disconnected() {
5585 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5589 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5593 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5594 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5597 log_trace!(logger, "{:?}", e);
5601 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5603 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5608 match &self.context.holder_signer {
5609 ChannelSignerType::Ecdsa(ecdsa) => {
5610 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5612 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5617 let short_channel_id = match self.context.get_short_channel_id() {
5619 None => return None,
5622 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5624 Some(msgs::AnnouncementSignatures {
5625 channel_id: self.context.channel_id(),
5627 node_signature: our_node_sig,
5628 bitcoin_signature: our_bitcoin_sig,
5631 // TODO (taproot|arik)
5637 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5639 fn sign_channel_announcement<NS: Deref>(
5640 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5641 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5642 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5643 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5644 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5645 let were_node_one = announcement.node_id_1 == our_node_key;
5647 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5648 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5649 match &self.context.holder_signer {
5650 ChannelSignerType::Ecdsa(ecdsa) => {
5651 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5652 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5653 Ok(msgs::ChannelAnnouncement {
5654 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5655 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5656 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5657 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5658 contents: announcement,
5661 // TODO (taproot|arik)
5666 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5670 /// Processes an incoming announcement_signatures message, providing a fully-signed
5671 /// channel_announcement message which we can broadcast and storing our counterparty's
5672 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5673 pub fn announcement_signatures<NS: Deref>(
5674 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5675 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5676 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5677 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5679 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5681 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5682 return Err(ChannelError::Close(format!(
5683 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5684 &announcement, self.context.get_counterparty_node_id())));
5686 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5687 return Err(ChannelError::Close(format!(
5688 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5689 &announcement, self.context.counterparty_funding_pubkey())));
5692 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5693 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5694 return Err(ChannelError::Ignore(
5695 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5698 self.sign_channel_announcement(node_signer, announcement)
5701 /// Gets a signed channel_announcement for this channel, if we previously received an
5702 /// announcement_signatures from our counterparty.
5703 pub fn get_signed_channel_announcement<NS: Deref>(
5704 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5705 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5706 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5709 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5711 Err(_) => return None,
5713 match self.sign_channel_announcement(node_signer, announcement) {
5714 Ok(res) => Some(res),
5719 /// May panic if called on a channel that wasn't immediately-previously
5720 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5721 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5722 assert!(self.context.channel_state.is_peer_disconnected());
5723 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5724 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5725 // current to_remote balances. However, it no longer has any use, and thus is now simply
5726 // set to a dummy (but valid, as required by the spec) public key.
5727 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5728 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5729 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5730 let mut pk = [2; 33]; pk[1] = 0xff;
5731 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5732 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5733 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5734 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5737 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5740 self.mark_awaiting_response();
5741 msgs::ChannelReestablish {
5742 channel_id: self.context.channel_id(),
5743 // The protocol has two different commitment number concepts - the "commitment
5744 // transaction number", which starts from 0 and counts up, and the "revocation key
5745 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5746 // commitment transaction numbers by the index which will be used to reveal the
5747 // revocation key for that commitment transaction, which means we have to convert them
5748 // to protocol-level commitment numbers here...
5750 // next_local_commitment_number is the next commitment_signed number we expect to
5751 // receive (indicating if they need to resend one that we missed).
5752 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5753 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5754 // receive, however we track it by the next commitment number for a remote transaction
5755 // (which is one further, as they always revoke previous commitment transaction, not
5756 // the one we send) so we have to decrement by 1. Note that if
5757 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5758 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5760 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5761 your_last_per_commitment_secret: remote_last_secret,
5762 my_current_per_commitment_point: dummy_pubkey,
5763 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5764 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5765 // txid of that interactive transaction, else we MUST NOT set it.
5766 next_funding_txid: None,
5771 // Send stuff to our remote peers:
5773 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5774 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5775 /// commitment update.
5777 /// `Err`s will only be [`ChannelError::Ignore`].
5778 pub fn queue_add_htlc<F: Deref, L: Deref>(
5779 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5780 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5781 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5782 ) -> Result<(), ChannelError>
5783 where F::Target: FeeEstimator, L::Target: Logger
5786 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5787 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5788 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5790 if let ChannelError::Ignore(_) = err { /* fine */ }
5791 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5796 /// Adds a pending outbound HTLC to this channel, note that you probably want
5797 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5799 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5801 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5802 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5804 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5805 /// we may not yet have sent the previous commitment update messages and will need to
5806 /// regenerate them.
5808 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5809 /// on this [`Channel`] if `force_holding_cell` is false.
5811 /// `Err`s will only be [`ChannelError::Ignore`].
5812 fn send_htlc<F: Deref, L: Deref>(
5813 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5814 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5815 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5816 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5817 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5818 where F::Target: FeeEstimator, L::Target: Logger
5820 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5821 self.context.channel_state.is_local_shutdown_sent() ||
5822 self.context.channel_state.is_remote_shutdown_sent()
5824 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5826 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5827 if amount_msat > channel_total_msat {
5828 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5831 if amount_msat == 0 {
5832 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5835 let available_balances = self.context.get_available_balances(fee_estimator);
5836 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5837 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5838 available_balances.next_outbound_htlc_minimum_msat)));
5841 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5842 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5843 available_balances.next_outbound_htlc_limit_msat)));
5846 if self.context.channel_state.is_peer_disconnected() {
5847 // Note that this should never really happen, if we're !is_live() on receipt of an
5848 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5849 // the user to send directly into a !is_live() channel. However, if we
5850 // disconnected during the time the previous hop was doing the commitment dance we may
5851 // end up getting here after the forwarding delay. In any case, returning an
5852 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5853 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5856 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
5857 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5858 payment_hash, amount_msat,
5859 if force_holding_cell { "into holding cell" }
5860 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5861 else { "to peer" });
5863 if need_holding_cell {
5864 force_holding_cell = true;
5867 // Now update local state:
5868 if force_holding_cell {
5869 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5874 onion_routing_packet,
5881 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5882 htlc_id: self.context.next_holder_htlc_id,
5884 payment_hash: payment_hash.clone(),
5886 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5892 let res = msgs::UpdateAddHTLC {
5893 channel_id: self.context.channel_id,
5894 htlc_id: self.context.next_holder_htlc_id,
5898 onion_routing_packet,
5902 self.context.next_holder_htlc_id += 1;
5907 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5908 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5909 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5910 // fail to generate this, we still are at least at a position where upgrading their status
5912 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5913 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5914 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5916 if let Some(state) = new_state {
5917 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5921 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5922 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5923 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5924 // Grab the preimage, if it exists, instead of cloning
5925 let mut reason = OutboundHTLCOutcome::Success(None);
5926 mem::swap(outcome, &mut reason);
5927 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5930 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5931 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5932 debug_assert!(!self.context.is_outbound());
5933 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5934 self.context.feerate_per_kw = feerate;
5935 self.context.pending_update_fee = None;
5938 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5940 let (mut htlcs_ref, counterparty_commitment_tx) =
5941 self.build_commitment_no_state_update(logger);
5942 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5943 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5944 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5946 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5947 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5950 self.context.latest_monitor_update_id += 1;
5951 let monitor_update = ChannelMonitorUpdate {
5952 update_id: self.context.latest_monitor_update_id,
5953 counterparty_node_id: Some(self.context.counterparty_node_id),
5954 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5955 commitment_txid: counterparty_commitment_txid,
5956 htlc_outputs: htlcs.clone(),
5957 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5958 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5959 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5960 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5961 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5964 self.context.channel_state.set_awaiting_remote_revoke();
5968 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5969 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5970 where L::Target: Logger
5972 let counterparty_keys = self.context.build_remote_transaction_keys();
5973 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5974 let counterparty_commitment_tx = commitment_stats.tx;
5976 #[cfg(any(test, fuzzing))]
5978 if !self.context.is_outbound() {
5979 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5980 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5981 if let Some(info) = projected_commit_tx_info {
5982 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5983 if info.total_pending_htlcs == total_pending_htlcs
5984 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5985 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5986 && info.feerate == self.context.feerate_per_kw {
5987 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5988 assert_eq!(actual_fee, info.fee);
5994 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5997 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5998 /// generation when we shouldn't change HTLC/channel state.
5999 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6000 // Get the fee tests from `build_commitment_no_state_update`
6001 #[cfg(any(test, fuzzing))]
6002 self.build_commitment_no_state_update(logger);
6004 let counterparty_keys = self.context.build_remote_transaction_keys();
6005 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6006 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6008 match &self.context.holder_signer {
6009 ChannelSignerType::Ecdsa(ecdsa) => {
6010 let (signature, htlc_signatures);
6013 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6014 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6018 let res = ecdsa.sign_counterparty_commitment(
6019 &commitment_stats.tx,
6020 commitment_stats.inbound_htlc_preimages,
6021 commitment_stats.outbound_htlc_preimages,
6022 &self.context.secp_ctx,
6023 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6025 htlc_signatures = res.1;
6027 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6028 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6029 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6030 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6032 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6033 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6034 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6035 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6036 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6037 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6041 Ok((msgs::CommitmentSigned {
6042 channel_id: self.context.channel_id,
6046 partial_signature_with_nonce: None,
6047 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6049 // TODO (taproot|arik)
6055 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6056 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6058 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6059 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6060 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6061 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6062 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6063 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6064 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6065 where F::Target: FeeEstimator, L::Target: Logger
6067 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6068 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6069 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6072 let monitor_update = self.build_commitment_no_status_check(logger);
6073 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6074 Ok(self.push_ret_blockable_mon_update(monitor_update))
6080 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6082 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6083 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6084 fee_base_msat: msg.contents.fee_base_msat,
6085 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6086 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6088 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6090 self.context.counterparty_forwarding_info = new_forwarding_info;
6096 /// Begins the shutdown process, getting a message for the remote peer and returning all
6097 /// holding cell HTLCs for payment failure.
6098 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6099 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6100 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6102 for htlc in self.context.pending_outbound_htlcs.iter() {
6103 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6104 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6107 if self.context.channel_state.is_local_shutdown_sent() {
6108 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6110 else if self.context.channel_state.is_remote_shutdown_sent() {
6111 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6113 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6114 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6116 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6117 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6118 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6121 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6124 // use override shutdown script if provided
6125 let shutdown_scriptpubkey = match override_shutdown_script {
6126 Some(script) => script,
6128 // otherwise, use the shutdown scriptpubkey provided by the signer
6129 match signer_provider.get_shutdown_scriptpubkey() {
6130 Ok(scriptpubkey) => scriptpubkey,
6131 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6135 if !shutdown_scriptpubkey.is_compatible(their_features) {
6136 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6138 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6143 // From here on out, we may not fail!
6144 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6145 self.context.channel_state.set_local_shutdown_sent();
6146 self.context.update_time_counter += 1;
6148 let monitor_update = if update_shutdown_script {
6149 self.context.latest_monitor_update_id += 1;
6150 let monitor_update = ChannelMonitorUpdate {
6151 update_id: self.context.latest_monitor_update_id,
6152 counterparty_node_id: Some(self.context.counterparty_node_id),
6153 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6154 scriptpubkey: self.get_closing_scriptpubkey(),
6157 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6158 self.push_ret_blockable_mon_update(monitor_update)
6160 let shutdown = msgs::Shutdown {
6161 channel_id: self.context.channel_id,
6162 scriptpubkey: self.get_closing_scriptpubkey(),
6165 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6166 // our shutdown until we've committed all of the pending changes.
6167 self.context.holding_cell_update_fee = None;
6168 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6169 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6171 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6172 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6179 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6180 "we can't both complete shutdown and return a monitor update");
6182 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6185 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6186 self.context.holding_cell_htlc_updates.iter()
6187 .flat_map(|htlc_update| {
6189 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6190 => Some((source, payment_hash)),
6194 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6198 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6199 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6200 pub context: ChannelContext<SP>,
6201 pub unfunded_context: UnfundedChannelContext,
6204 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6205 pub fn new<ES: Deref, F: Deref>(
6206 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6207 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6208 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6209 ) -> Result<OutboundV1Channel<SP>, APIError>
6210 where ES::Target: EntropySource,
6211 F::Target: FeeEstimator
6213 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6214 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6215 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6216 let pubkeys = holder_signer.pubkeys().clone();
6218 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6219 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6221 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6222 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6224 let channel_value_msat = channel_value_satoshis * 1000;
6225 if push_msat > channel_value_msat {
6226 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6228 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6229 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6231 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6232 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6233 // Protocol level safety check in place, although it should never happen because
6234 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6235 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6238 let channel_type = Self::get_initial_channel_type(&config, their_features);
6239 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6241 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6242 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6244 (ConfirmationTarget::NonAnchorChannelFee, 0)
6246 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6248 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6249 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6250 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6251 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6254 let mut secp_ctx = Secp256k1::new();
6255 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6257 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6258 match signer_provider.get_shutdown_scriptpubkey() {
6259 Ok(scriptpubkey) => Some(scriptpubkey),
6260 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6264 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6265 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6266 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6270 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6271 Ok(script) => script,
6272 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6275 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6278 context: ChannelContext {
6281 config: LegacyChannelConfig {
6282 options: config.channel_config.clone(),
6283 announced_channel: config.channel_handshake_config.announced_channel,
6284 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6289 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6291 channel_id: temporary_channel_id,
6292 temporary_channel_id: Some(temporary_channel_id),
6293 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6294 announcement_sigs_state: AnnouncementSigsState::NotSent,
6296 channel_value_satoshis,
6298 latest_monitor_update_id: 0,
6300 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6301 shutdown_scriptpubkey,
6304 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6305 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6308 pending_inbound_htlcs: Vec::new(),
6309 pending_outbound_htlcs: Vec::new(),
6310 holding_cell_htlc_updates: Vec::new(),
6311 pending_update_fee: None,
6312 holding_cell_update_fee: None,
6313 next_holder_htlc_id: 0,
6314 next_counterparty_htlc_id: 0,
6315 update_time_counter: 1,
6317 resend_order: RAACommitmentOrder::CommitmentFirst,
6319 monitor_pending_channel_ready: false,
6320 monitor_pending_revoke_and_ack: false,
6321 monitor_pending_commitment_signed: false,
6322 monitor_pending_forwards: Vec::new(),
6323 monitor_pending_failures: Vec::new(),
6324 monitor_pending_finalized_fulfills: Vec::new(),
6326 signer_pending_commitment_update: false,
6327 signer_pending_funding: false,
6329 #[cfg(debug_assertions)]
6330 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6331 #[cfg(debug_assertions)]
6332 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6334 last_sent_closing_fee: None,
6335 pending_counterparty_closing_signed: None,
6336 expecting_peer_commitment_signed: false,
6337 closing_fee_limits: None,
6338 target_closing_feerate_sats_per_kw: None,
6340 funding_tx_confirmed_in: None,
6341 funding_tx_confirmation_height: 0,
6342 short_channel_id: None,
6343 channel_creation_height: current_chain_height,
6345 feerate_per_kw: commitment_feerate,
6346 counterparty_dust_limit_satoshis: 0,
6347 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6348 counterparty_max_htlc_value_in_flight_msat: 0,
6349 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6350 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6351 holder_selected_channel_reserve_satoshis,
6352 counterparty_htlc_minimum_msat: 0,
6353 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6354 counterparty_max_accepted_htlcs: 0,
6355 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6356 minimum_depth: None, // Filled in in accept_channel
6358 counterparty_forwarding_info: None,
6360 channel_transaction_parameters: ChannelTransactionParameters {
6361 holder_pubkeys: pubkeys,
6362 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6363 is_outbound_from_holder: true,
6364 counterparty_parameters: None,
6365 funding_outpoint: None,
6366 channel_type_features: channel_type.clone()
6368 funding_transaction: None,
6369 is_batch_funding: None,
6371 counterparty_cur_commitment_point: None,
6372 counterparty_prev_commitment_point: None,
6373 counterparty_node_id,
6375 counterparty_shutdown_scriptpubkey: None,
6377 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6379 channel_update_status: ChannelUpdateStatus::Enabled,
6380 closing_signed_in_flight: false,
6382 announcement_sigs: None,
6384 #[cfg(any(test, fuzzing))]
6385 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6386 #[cfg(any(test, fuzzing))]
6387 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6389 workaround_lnd_bug_4006: None,
6390 sent_message_awaiting_response: None,
6392 latest_inbound_scid_alias: None,
6393 outbound_scid_alias,
6395 channel_pending_event_emitted: false,
6396 channel_ready_event_emitted: false,
6398 #[cfg(any(test, fuzzing))]
6399 historical_inbound_htlc_fulfills: HashSet::new(),
6404 blocked_monitor_updates: Vec::new(),
6406 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6410 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6411 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6412 let counterparty_keys = self.context.build_remote_transaction_keys();
6413 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6414 let signature = match &self.context.holder_signer {
6415 // TODO (taproot|arik): move match into calling method for Taproot
6416 ChannelSignerType::Ecdsa(ecdsa) => {
6417 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6418 .map(|(sig, _)| sig).ok()?
6420 // TODO (taproot|arik)
6425 if self.context.signer_pending_funding {
6426 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6427 self.context.signer_pending_funding = false;
6430 Some(msgs::FundingCreated {
6431 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6432 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6433 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6436 partial_signature_with_nonce: None,
6438 next_local_nonce: None,
6442 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6443 /// a funding_created message for the remote peer.
6444 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6445 /// or if called on an inbound channel.
6446 /// Note that channel_id changes during this call!
6447 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6448 /// If an Err is returned, it is a ChannelError::Close.
6449 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6450 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6451 if !self.context.is_outbound() {
6452 panic!("Tried to create outbound funding_created message on an inbound channel!");
6455 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6456 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6458 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6460 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6461 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6462 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6463 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6466 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6467 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6469 // Now that we're past error-generating stuff, update our local state:
6471 self.context.channel_state = ChannelState::FundingNegotiated;
6472 self.context.channel_id = funding_txo.to_channel_id();
6474 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6475 // We can skip this if it is a zero-conf channel.
6476 if funding_transaction.is_coin_base() &&
6477 self.context.minimum_depth.unwrap_or(0) > 0 &&
6478 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6479 self.context.minimum_depth = Some(COINBASE_MATURITY);
6482 self.context.funding_transaction = Some(funding_transaction);
6483 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6485 let funding_created = self.get_funding_created_msg(logger);
6486 if funding_created.is_none() {
6487 #[cfg(not(async_signing))] {
6488 panic!("Failed to get signature for new funding creation");
6490 #[cfg(async_signing)] {
6491 if !self.context.signer_pending_funding {
6492 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6493 self.context.signer_pending_funding = true;
6501 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6502 // The default channel type (ie the first one we try) depends on whether the channel is
6503 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6504 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6505 // with no other changes, and fall back to `only_static_remotekey`.
6506 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6507 if !config.channel_handshake_config.announced_channel &&
6508 config.channel_handshake_config.negotiate_scid_privacy &&
6509 their_features.supports_scid_privacy() {
6510 ret.set_scid_privacy_required();
6513 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6514 // set it now. If they don't understand it, we'll fall back to our default of
6515 // `only_static_remotekey`.
6516 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6517 their_features.supports_anchors_zero_fee_htlc_tx() {
6518 ret.set_anchors_zero_fee_htlc_tx_required();
6524 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6525 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6526 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6527 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6528 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6529 ) -> Result<msgs::OpenChannel, ()>
6531 F::Target: FeeEstimator
6533 if !self.context.is_outbound() ||
6535 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6536 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6541 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6542 // We've exhausted our options
6545 // We support opening a few different types of channels. Try removing our additional
6546 // features one by one until we've either arrived at our default or the counterparty has
6549 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6550 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6551 // checks whether the counterparty supports every feature, this would only happen if the
6552 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6554 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6555 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6556 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6557 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6558 } else if self.context.channel_type.supports_scid_privacy() {
6559 self.context.channel_type.clear_scid_privacy();
6561 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6563 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6564 Ok(self.get_open_channel(chain_hash))
6567 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6568 if !self.context.is_outbound() {
6569 panic!("Tried to open a channel for an inbound channel?");
6571 if self.context.have_received_message() {
6572 panic!("Cannot generate an open_channel after we've moved forward");
6575 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6576 panic!("Tried to send an open_channel for a channel that has already advanced");
6579 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6580 let keys = self.context.get_holder_pubkeys();
6584 temporary_channel_id: self.context.channel_id,
6585 funding_satoshis: self.context.channel_value_satoshis,
6586 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6587 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6588 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6589 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6590 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6591 feerate_per_kw: self.context.feerate_per_kw as u32,
6592 to_self_delay: self.context.get_holder_selected_contest_delay(),
6593 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6594 funding_pubkey: keys.funding_pubkey,
6595 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6596 payment_point: keys.payment_point,
6597 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6598 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6599 first_per_commitment_point,
6600 channel_flags: if self.context.config.announced_channel {1} else {0},
6601 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6602 Some(script) => script.clone().into_inner(),
6603 None => Builder::new().into_script(),
6605 channel_type: Some(self.context.channel_type.clone()),
6610 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6611 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6613 // Check sanity of message fields:
6614 if !self.context.is_outbound() {
6615 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6617 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6618 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6620 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6621 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6623 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6624 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6626 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6627 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6629 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6630 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6631 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6633 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6634 if msg.htlc_minimum_msat >= full_channel_value_msat {
6635 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6637 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6638 if msg.to_self_delay > max_delay_acceptable {
6639 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6641 if msg.max_accepted_htlcs < 1 {
6642 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6644 if msg.max_accepted_htlcs > MAX_HTLCS {
6645 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6648 // Now check against optional parameters as set by config...
6649 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6650 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6652 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6653 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6655 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6656 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6658 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6659 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6661 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6662 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6664 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6665 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6667 if msg.minimum_depth > peer_limits.max_minimum_depth {
6668 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6671 if let Some(ty) = &msg.channel_type {
6672 if *ty != self.context.channel_type {
6673 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6675 } else if their_features.supports_channel_type() {
6676 // Assume they've accepted the channel type as they said they understand it.
6678 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6679 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6680 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6682 self.context.channel_type = channel_type.clone();
6683 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6686 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6687 match &msg.shutdown_scriptpubkey {
6688 &Some(ref script) => {
6689 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6690 if script.len() == 0 {
6693 if !script::is_bolt2_compliant(&script, their_features) {
6694 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6696 Some(script.clone())
6699 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6701 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6706 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6707 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6708 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6709 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6710 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6712 if peer_limits.trust_own_funding_0conf {
6713 self.context.minimum_depth = Some(msg.minimum_depth);
6715 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6718 let counterparty_pubkeys = ChannelPublicKeys {
6719 funding_pubkey: msg.funding_pubkey,
6720 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6721 payment_point: msg.payment_point,
6722 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6723 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6726 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6727 selected_contest_delay: msg.to_self_delay,
6728 pubkeys: counterparty_pubkeys,
6731 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6732 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6734 self.context.channel_state = ChannelState::NegotiatingFunding(
6735 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6737 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6742 /// Handles a funding_signed message from the remote end.
6743 /// If this call is successful, broadcast the funding transaction (and not before!)
6744 pub fn funding_signed<L: Deref>(
6745 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6746 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6750 if !self.context.is_outbound() {
6751 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6753 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6754 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6756 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6757 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6758 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6759 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6762 let funding_script = self.context.get_funding_redeemscript();
6764 let counterparty_keys = self.context.build_remote_transaction_keys();
6765 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6766 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6767 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6769 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6770 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6772 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6773 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6775 let trusted_tx = initial_commitment_tx.trust();
6776 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6777 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6778 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6779 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6780 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6784 let holder_commitment_tx = HolderCommitmentTransaction::new(
6785 initial_commitment_tx,
6788 &self.context.get_holder_pubkeys().funding_pubkey,
6789 self.context.counterparty_funding_pubkey()
6793 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6794 if validated.is_err() {
6795 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6798 let funding_redeemscript = self.context.get_funding_redeemscript();
6799 let funding_txo = self.context.get_funding_txo().unwrap();
6800 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6801 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6802 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6803 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6804 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6805 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6806 shutdown_script, self.context.get_holder_selected_contest_delay(),
6807 &self.context.destination_script, (funding_txo, funding_txo_script),
6808 &self.context.channel_transaction_parameters,
6809 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6811 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6812 channel_monitor.provide_initial_counterparty_commitment_tx(
6813 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6814 self.context.cur_counterparty_commitment_transaction_number,
6815 self.context.counterparty_cur_commitment_point.unwrap(),
6816 counterparty_initial_commitment_tx.feerate_per_kw(),
6817 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6818 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6820 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6821 if self.context.is_batch_funding() {
6822 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6824 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6826 self.context.cur_holder_commitment_transaction_number -= 1;
6827 self.context.cur_counterparty_commitment_transaction_number -= 1;
6829 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6831 let mut channel = Channel { context: self.context };
6833 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6834 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6835 Ok((channel, channel_monitor))
6838 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6840 #[cfg(async_signing)]
6841 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6842 if self.context.signer_pending_funding && self.context.is_outbound() {
6843 log_trace!(logger, "Signer unblocked a funding_created");
6844 self.get_funding_created_msg(logger)
6849 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6850 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6851 pub context: ChannelContext<SP>,
6852 pub unfunded_context: UnfundedChannelContext,
6855 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
6856 /// [`msgs::OpenChannel`].
6857 pub(super) fn channel_type_from_open_channel(
6858 msg: &msgs::OpenChannel, their_features: &InitFeatures,
6859 our_supported_features: &ChannelTypeFeatures
6860 ) -> Result<ChannelTypeFeatures, ChannelError> {
6861 if let Some(channel_type) = &msg.channel_type {
6862 if channel_type.supports_any_optional_bits() {
6863 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6866 // We only support the channel types defined by the `ChannelManager` in
6867 // `provided_channel_type_features`. The channel type must always support
6868 // `static_remote_key`.
6869 if !channel_type.requires_static_remote_key() {
6870 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6872 // Make sure we support all of the features behind the channel type.
6873 if !channel_type.is_subset(our_supported_features) {
6874 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6876 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6877 if channel_type.requires_scid_privacy() && announced_channel {
6878 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6880 Ok(channel_type.clone())
6882 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6883 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6884 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6890 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6891 /// Creates a new channel from a remote sides' request for one.
6892 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6893 pub fn new<ES: Deref, F: Deref, L: Deref>(
6894 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6895 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6896 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6897 current_chain_height: u32, logger: &L, is_0conf: bool,
6898 ) -> Result<InboundV1Channel<SP>, ChannelError>
6899 where ES::Target: EntropySource,
6900 F::Target: FeeEstimator,
6903 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6904 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6906 // First check the channel type is known, failing before we do anything else if we don't
6907 // support this channel type.
6908 let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
6910 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6911 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6912 let pubkeys = holder_signer.pubkeys().clone();
6913 let counterparty_pubkeys = ChannelPublicKeys {
6914 funding_pubkey: msg.funding_pubkey,
6915 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6916 payment_point: msg.payment_point,
6917 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6918 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6921 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6922 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6925 // Check sanity of message fields:
6926 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6927 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6929 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6930 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6932 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6933 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6935 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6936 if msg.push_msat > full_channel_value_msat {
6937 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6939 if msg.dust_limit_satoshis > msg.funding_satoshis {
6940 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6942 if msg.htlc_minimum_msat >= full_channel_value_msat {
6943 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6945 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6947 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6948 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6949 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6951 if msg.max_accepted_htlcs < 1 {
6952 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6954 if msg.max_accepted_htlcs > MAX_HTLCS {
6955 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6958 // Now check against optional parameters as set by config...
6959 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6960 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6962 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6963 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6965 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6966 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6968 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6969 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6971 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6972 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6974 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6975 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6977 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6978 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6981 // Convert things into internal flags and prep our state:
6983 if config.channel_handshake_limits.force_announced_channel_preference {
6984 if config.channel_handshake_config.announced_channel != announced_channel {
6985 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6989 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6990 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6991 // Protocol level safety check in place, although it should never happen because
6992 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6993 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6995 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6996 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6998 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6999 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
7000 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7002 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
7003 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
7006 // check if the funder's amount for the initial commitment tx is sufficient
7007 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
7008 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
7009 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
7013 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
7014 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
7015 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
7016 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7019 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7020 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7021 // want to push much to us), our counterparty should always have more than our reserve.
7022 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7023 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7026 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7027 match &msg.shutdown_scriptpubkey {
7028 &Some(ref script) => {
7029 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7030 if script.len() == 0 {
7033 if !script::is_bolt2_compliant(&script, their_features) {
7034 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7036 Some(script.clone())
7039 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7041 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7046 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7047 match signer_provider.get_shutdown_scriptpubkey() {
7048 Ok(scriptpubkey) => Some(scriptpubkey),
7049 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7053 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7054 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7055 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7059 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7060 Ok(script) => script,
7061 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7064 let mut secp_ctx = Secp256k1::new();
7065 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7067 let minimum_depth = if is_0conf {
7070 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7074 context: ChannelContext {
7077 config: LegacyChannelConfig {
7078 options: config.channel_config.clone(),
7080 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7085 inbound_handshake_limits_override: None,
7087 temporary_channel_id: Some(msg.temporary_channel_id),
7088 channel_id: msg.temporary_channel_id,
7089 channel_state: ChannelState::NegotiatingFunding(
7090 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7092 announcement_sigs_state: AnnouncementSigsState::NotSent,
7095 latest_monitor_update_id: 0,
7097 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7098 shutdown_scriptpubkey,
7101 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7102 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7103 value_to_self_msat: msg.push_msat,
7105 pending_inbound_htlcs: Vec::new(),
7106 pending_outbound_htlcs: Vec::new(),
7107 holding_cell_htlc_updates: Vec::new(),
7108 pending_update_fee: None,
7109 holding_cell_update_fee: None,
7110 next_holder_htlc_id: 0,
7111 next_counterparty_htlc_id: 0,
7112 update_time_counter: 1,
7114 resend_order: RAACommitmentOrder::CommitmentFirst,
7116 monitor_pending_channel_ready: false,
7117 monitor_pending_revoke_and_ack: false,
7118 monitor_pending_commitment_signed: false,
7119 monitor_pending_forwards: Vec::new(),
7120 monitor_pending_failures: Vec::new(),
7121 monitor_pending_finalized_fulfills: Vec::new(),
7123 signer_pending_commitment_update: false,
7124 signer_pending_funding: false,
7126 #[cfg(debug_assertions)]
7127 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7128 #[cfg(debug_assertions)]
7129 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7131 last_sent_closing_fee: None,
7132 pending_counterparty_closing_signed: None,
7133 expecting_peer_commitment_signed: false,
7134 closing_fee_limits: None,
7135 target_closing_feerate_sats_per_kw: None,
7137 funding_tx_confirmed_in: None,
7138 funding_tx_confirmation_height: 0,
7139 short_channel_id: None,
7140 channel_creation_height: current_chain_height,
7142 feerate_per_kw: msg.feerate_per_kw,
7143 channel_value_satoshis: msg.funding_satoshis,
7144 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7145 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7146 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7147 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7148 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7149 holder_selected_channel_reserve_satoshis,
7150 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7151 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7152 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7153 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7156 counterparty_forwarding_info: None,
7158 channel_transaction_parameters: ChannelTransactionParameters {
7159 holder_pubkeys: pubkeys,
7160 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7161 is_outbound_from_holder: false,
7162 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7163 selected_contest_delay: msg.to_self_delay,
7164 pubkeys: counterparty_pubkeys,
7166 funding_outpoint: None,
7167 channel_type_features: channel_type.clone()
7169 funding_transaction: None,
7170 is_batch_funding: None,
7172 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7173 counterparty_prev_commitment_point: None,
7174 counterparty_node_id,
7176 counterparty_shutdown_scriptpubkey,
7178 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7180 channel_update_status: ChannelUpdateStatus::Enabled,
7181 closing_signed_in_flight: false,
7183 announcement_sigs: None,
7185 #[cfg(any(test, fuzzing))]
7186 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7187 #[cfg(any(test, fuzzing))]
7188 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7190 workaround_lnd_bug_4006: None,
7191 sent_message_awaiting_response: None,
7193 latest_inbound_scid_alias: None,
7194 outbound_scid_alias: 0,
7196 channel_pending_event_emitted: false,
7197 channel_ready_event_emitted: false,
7199 #[cfg(any(test, fuzzing))]
7200 historical_inbound_htlc_fulfills: HashSet::new(),
7205 blocked_monitor_updates: Vec::new(),
7207 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7213 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7214 /// should be sent back to the counterparty node.
7216 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7217 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7218 if self.context.is_outbound() {
7219 panic!("Tried to send accept_channel for an outbound channel?");
7222 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7223 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7225 panic!("Tried to send accept_channel after channel had moved forward");
7227 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7228 panic!("Tried to send an accept_channel for a channel that has already advanced");
7231 self.generate_accept_channel_message()
7234 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7235 /// inbound channel. If the intention is to accept an inbound channel, use
7236 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7238 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7239 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7240 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7241 let keys = self.context.get_holder_pubkeys();
7243 msgs::AcceptChannel {
7244 temporary_channel_id: self.context.channel_id,
7245 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7246 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7247 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7248 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7249 minimum_depth: self.context.minimum_depth.unwrap(),
7250 to_self_delay: self.context.get_holder_selected_contest_delay(),
7251 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7252 funding_pubkey: keys.funding_pubkey,
7253 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7254 payment_point: keys.payment_point,
7255 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7256 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7257 first_per_commitment_point,
7258 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7259 Some(script) => script.clone().into_inner(),
7260 None => Builder::new().into_script(),
7262 channel_type: Some(self.context.channel_type.clone()),
7264 next_local_nonce: None,
7268 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7269 /// inbound channel without accepting it.
7271 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7273 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7274 self.generate_accept_channel_message()
7277 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7278 let funding_script = self.context.get_funding_redeemscript();
7280 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7281 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7282 let trusted_tx = initial_commitment_tx.trust();
7283 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7284 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7285 // They sign the holder commitment transaction...
7286 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7287 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7288 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7289 encode::serialize_hex(&funding_script), &self.context.channel_id());
7290 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7292 Ok(initial_commitment_tx)
7295 pub fn funding_created<L: Deref>(
7296 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7297 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7301 if self.context.is_outbound() {
7302 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7305 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7306 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7308 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7309 // remember the channel, so it's safe to just send an error_message here and drop the
7311 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7313 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7314 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7315 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7316 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7319 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7320 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7321 // This is an externally observable change before we finish all our checks. In particular
7322 // check_funding_created_signature may fail.
7323 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7325 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7327 Err(ChannelError::Close(e)) => {
7328 self.context.channel_transaction_parameters.funding_outpoint = None;
7329 return Err((self, ChannelError::Close(e)));
7332 // The only error we know how to handle is ChannelError::Close, so we fall over here
7333 // to make sure we don't continue with an inconsistent state.
7334 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7338 let holder_commitment_tx = HolderCommitmentTransaction::new(
7339 initial_commitment_tx,
7342 &self.context.get_holder_pubkeys().funding_pubkey,
7343 self.context.counterparty_funding_pubkey()
7346 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7347 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7350 // Now that we're past error-generating stuff, update our local state:
7352 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7353 self.context.channel_id = funding_txo.to_channel_id();
7354 self.context.cur_counterparty_commitment_transaction_number -= 1;
7355 self.context.cur_holder_commitment_transaction_number -= 1;
7357 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7359 let funding_redeemscript = self.context.get_funding_redeemscript();
7360 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7361 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7362 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7363 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7364 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7365 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7366 shutdown_script, self.context.get_holder_selected_contest_delay(),
7367 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7368 &self.context.channel_transaction_parameters,
7369 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7371 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7372 channel_monitor.provide_initial_counterparty_commitment_tx(
7373 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7374 self.context.cur_counterparty_commitment_transaction_number + 1,
7375 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7376 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7377 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7379 log_info!(logger, "{} funding_signed for peer for channel {}",
7380 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7382 // Promote the channel to a full-fledged one now that we have updated the state and have a
7383 // `ChannelMonitor`.
7384 let mut channel = Channel {
7385 context: self.context,
7387 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7388 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7390 Ok((channel, funding_signed, channel_monitor))
7394 const SERIALIZATION_VERSION: u8 = 3;
7395 const MIN_SERIALIZATION_VERSION: u8 = 3;
7397 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7403 impl Writeable for ChannelUpdateStatus {
7404 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7405 // We only care about writing out the current state as it was announced, ie only either
7406 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7407 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7409 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7410 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7411 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7412 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7418 impl Readable for ChannelUpdateStatus {
7419 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7420 Ok(match <u8 as Readable>::read(reader)? {
7421 0 => ChannelUpdateStatus::Enabled,
7422 1 => ChannelUpdateStatus::Disabled,
7423 _ => return Err(DecodeError::InvalidValue),
7428 impl Writeable for AnnouncementSigsState {
7429 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7430 // We only care about writing out the current state as if we had just disconnected, at
7431 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7433 AnnouncementSigsState::NotSent => 0u8.write(writer),
7434 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7435 AnnouncementSigsState::Committed => 0u8.write(writer),
7436 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7441 impl Readable for AnnouncementSigsState {
7442 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7443 Ok(match <u8 as Readable>::read(reader)? {
7444 0 => AnnouncementSigsState::NotSent,
7445 1 => AnnouncementSigsState::PeerReceived,
7446 _ => return Err(DecodeError::InvalidValue),
7451 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7452 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7453 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7456 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7458 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7459 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7460 // the low bytes now and the optional high bytes later.
7461 let user_id_low = self.context.user_id as u64;
7462 user_id_low.write(writer)?;
7464 // Version 1 deserializers expected to read parts of the config object here. Version 2
7465 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7466 // `minimum_depth` we simply write dummy values here.
7467 writer.write_all(&[0; 8])?;
7469 self.context.channel_id.write(writer)?;
7471 let mut channel_state = self.context.channel_state;
7472 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7473 channel_state.set_peer_disconnected();
7475 channel_state.to_u32().write(writer)?;
7477 self.context.channel_value_satoshis.write(writer)?;
7479 self.context.latest_monitor_update_id.write(writer)?;
7481 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7482 // deserialized from that format.
7483 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7484 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7485 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7487 self.context.destination_script.write(writer)?;
7489 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7490 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7491 self.context.value_to_self_msat.write(writer)?;
7493 let mut dropped_inbound_htlcs = 0;
7494 for htlc in self.context.pending_inbound_htlcs.iter() {
7495 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7496 dropped_inbound_htlcs += 1;
7499 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7500 for htlc in self.context.pending_inbound_htlcs.iter() {
7501 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7504 htlc.htlc_id.write(writer)?;
7505 htlc.amount_msat.write(writer)?;
7506 htlc.cltv_expiry.write(writer)?;
7507 htlc.payment_hash.write(writer)?;
7509 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7510 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7512 htlc_state.write(writer)?;
7514 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7516 htlc_state.write(writer)?;
7518 &InboundHTLCState::Committed => {
7521 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7523 removal_reason.write(writer)?;
7528 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7529 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7530 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7532 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7533 for htlc in self.context.pending_outbound_htlcs.iter() {
7534 htlc.htlc_id.write(writer)?;
7535 htlc.amount_msat.write(writer)?;
7536 htlc.cltv_expiry.write(writer)?;
7537 htlc.payment_hash.write(writer)?;
7538 htlc.source.write(writer)?;
7540 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7542 onion_packet.write(writer)?;
7544 &OutboundHTLCState::Committed => {
7547 &OutboundHTLCState::RemoteRemoved(_) => {
7548 // Treat this as a Committed because we haven't received the CS - they'll
7549 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7552 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7554 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7555 preimages.push(preimage);
7557 let reason: Option<&HTLCFailReason> = outcome.into();
7558 reason.write(writer)?;
7560 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7562 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7563 preimages.push(preimage);
7565 let reason: Option<&HTLCFailReason> = outcome.into();
7566 reason.write(writer)?;
7569 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7570 pending_outbound_blinding_points.push(htlc.blinding_point);
7573 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7574 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7575 // Vec of (htlc_id, failure_code, sha256_of_onion)
7576 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7577 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7578 for update in self.context.holding_cell_htlc_updates.iter() {
7580 &HTLCUpdateAwaitingACK::AddHTLC {
7581 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7582 blinding_point, skimmed_fee_msat,
7585 amount_msat.write(writer)?;
7586 cltv_expiry.write(writer)?;
7587 payment_hash.write(writer)?;
7588 source.write(writer)?;
7589 onion_routing_packet.write(writer)?;
7591 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7592 holding_cell_blinding_points.push(blinding_point);
7594 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7596 payment_preimage.write(writer)?;
7597 htlc_id.write(writer)?;
7599 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7601 htlc_id.write(writer)?;
7602 err_packet.write(writer)?;
7604 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7605 htlc_id, failure_code, sha256_of_onion
7607 // We don't want to break downgrading by adding a new variant, so write a dummy
7608 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7609 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7611 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7613 htlc_id.write(writer)?;
7614 dummy_err_packet.write(writer)?;
7619 match self.context.resend_order {
7620 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7621 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7624 self.context.monitor_pending_channel_ready.write(writer)?;
7625 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7626 self.context.monitor_pending_commitment_signed.write(writer)?;
7628 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7629 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7630 pending_forward.write(writer)?;
7631 htlc_id.write(writer)?;
7634 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7635 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7636 htlc_source.write(writer)?;
7637 payment_hash.write(writer)?;
7638 fail_reason.write(writer)?;
7641 if self.context.is_outbound() {
7642 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7643 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7644 Some(feerate).write(writer)?;
7646 // As for inbound HTLCs, if the update was only announced and never committed in a
7647 // commitment_signed, drop it.
7648 None::<u32>.write(writer)?;
7650 self.context.holding_cell_update_fee.write(writer)?;
7652 self.context.next_holder_htlc_id.write(writer)?;
7653 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7654 self.context.update_time_counter.write(writer)?;
7655 self.context.feerate_per_kw.write(writer)?;
7657 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7658 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7659 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7660 // consider the stale state on reload.
7663 self.context.funding_tx_confirmed_in.write(writer)?;
7664 self.context.funding_tx_confirmation_height.write(writer)?;
7665 self.context.short_channel_id.write(writer)?;
7667 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7668 self.context.holder_dust_limit_satoshis.write(writer)?;
7669 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7671 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7672 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7674 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7675 self.context.holder_htlc_minimum_msat.write(writer)?;
7676 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7678 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7679 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7681 match &self.context.counterparty_forwarding_info {
7684 info.fee_base_msat.write(writer)?;
7685 info.fee_proportional_millionths.write(writer)?;
7686 info.cltv_expiry_delta.write(writer)?;
7688 None => 0u8.write(writer)?
7691 self.context.channel_transaction_parameters.write(writer)?;
7692 self.context.funding_transaction.write(writer)?;
7694 self.context.counterparty_cur_commitment_point.write(writer)?;
7695 self.context.counterparty_prev_commitment_point.write(writer)?;
7696 self.context.counterparty_node_id.write(writer)?;
7698 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7700 self.context.commitment_secrets.write(writer)?;
7702 self.context.channel_update_status.write(writer)?;
7704 #[cfg(any(test, fuzzing))]
7705 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7706 #[cfg(any(test, fuzzing))]
7707 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7708 htlc.write(writer)?;
7711 // If the channel type is something other than only-static-remote-key, then we need to have
7712 // older clients fail to deserialize this channel at all. If the type is
7713 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7715 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7716 Some(&self.context.channel_type) } else { None };
7718 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7719 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7720 // a different percentage of the channel value then 10%, which older versions of LDK used
7721 // to set it to before the percentage was made configurable.
7722 let serialized_holder_selected_reserve =
7723 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7724 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7726 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7727 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7728 let serialized_holder_htlc_max_in_flight =
7729 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7730 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7732 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7733 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7735 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7736 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7737 // we write the high bytes as an option here.
7738 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7740 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7742 write_tlv_fields!(writer, {
7743 (0, self.context.announcement_sigs, option),
7744 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7745 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7746 // them twice, once with their original default values above, and once as an option
7747 // here. On the read side, old versions will simply ignore the odd-type entries here,
7748 // and new versions map the default values to None and allow the TLV entries here to
7750 (1, self.context.minimum_depth, option),
7751 (2, chan_type, option),
7752 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7753 (4, serialized_holder_selected_reserve, option),
7754 (5, self.context.config, required),
7755 (6, serialized_holder_htlc_max_in_flight, option),
7756 (7, self.context.shutdown_scriptpubkey, option),
7757 (8, self.context.blocked_monitor_updates, optional_vec),
7758 (9, self.context.target_closing_feerate_sats_per_kw, option),
7759 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7760 (13, self.context.channel_creation_height, required),
7761 (15, preimages, required_vec),
7762 (17, self.context.announcement_sigs_state, required),
7763 (19, self.context.latest_inbound_scid_alias, option),
7764 (21, self.context.outbound_scid_alias, required),
7765 (23, channel_ready_event_emitted, option),
7766 (25, user_id_high_opt, option),
7767 (27, self.context.channel_keys_id, required),
7768 (28, holder_max_accepted_htlcs, option),
7769 (29, self.context.temporary_channel_id, option),
7770 (31, channel_pending_event_emitted, option),
7771 (35, pending_outbound_skimmed_fees, optional_vec),
7772 (37, holding_cell_skimmed_fees, optional_vec),
7773 (38, self.context.is_batch_funding, option),
7774 (39, pending_outbound_blinding_points, optional_vec),
7775 (41, holding_cell_blinding_points, optional_vec),
7776 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7783 const MAX_ALLOC_SIZE: usize = 64*1024;
7784 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7786 ES::Target: EntropySource,
7787 SP::Target: SignerProvider
7789 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7790 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7791 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7793 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7794 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7795 // the low bytes now and the high bytes later.
7796 let user_id_low: u64 = Readable::read(reader)?;
7798 let mut config = Some(LegacyChannelConfig::default());
7800 // Read the old serialization of the ChannelConfig from version 0.0.98.
7801 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7802 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7803 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7804 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7806 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7807 let mut _val: u64 = Readable::read(reader)?;
7810 let channel_id = Readable::read(reader)?;
7811 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7812 let channel_value_satoshis = Readable::read(reader)?;
7814 let latest_monitor_update_id = Readable::read(reader)?;
7816 let mut keys_data = None;
7818 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7819 // the `channel_keys_id` TLV is present below.
7820 let keys_len: u32 = Readable::read(reader)?;
7821 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7822 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7823 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7824 let mut data = [0; 1024];
7825 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7826 reader.read_exact(read_slice)?;
7827 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7831 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7832 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7833 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7836 let destination_script = Readable::read(reader)?;
7838 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7839 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7840 let value_to_self_msat = Readable::read(reader)?;
7842 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7844 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7845 for _ in 0..pending_inbound_htlc_count {
7846 pending_inbound_htlcs.push(InboundHTLCOutput {
7847 htlc_id: Readable::read(reader)?,
7848 amount_msat: Readable::read(reader)?,
7849 cltv_expiry: Readable::read(reader)?,
7850 payment_hash: Readable::read(reader)?,
7851 state: match <u8 as Readable>::read(reader)? {
7852 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7853 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7854 3 => InboundHTLCState::Committed,
7855 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7856 _ => return Err(DecodeError::InvalidValue),
7861 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7862 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7863 for _ in 0..pending_outbound_htlc_count {
7864 pending_outbound_htlcs.push(OutboundHTLCOutput {
7865 htlc_id: Readable::read(reader)?,
7866 amount_msat: Readable::read(reader)?,
7867 cltv_expiry: Readable::read(reader)?,
7868 payment_hash: Readable::read(reader)?,
7869 source: Readable::read(reader)?,
7870 state: match <u8 as Readable>::read(reader)? {
7871 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7872 1 => OutboundHTLCState::Committed,
7874 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7875 OutboundHTLCState::RemoteRemoved(option.into())
7878 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7879 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7882 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7883 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7885 _ => return Err(DecodeError::InvalidValue),
7887 skimmed_fee_msat: None,
7888 blinding_point: None,
7892 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7893 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7894 for _ in 0..holding_cell_htlc_update_count {
7895 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7896 0 => HTLCUpdateAwaitingACK::AddHTLC {
7897 amount_msat: Readable::read(reader)?,
7898 cltv_expiry: Readable::read(reader)?,
7899 payment_hash: Readable::read(reader)?,
7900 source: Readable::read(reader)?,
7901 onion_routing_packet: Readable::read(reader)?,
7902 skimmed_fee_msat: None,
7903 blinding_point: None,
7905 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7906 payment_preimage: Readable::read(reader)?,
7907 htlc_id: Readable::read(reader)?,
7909 2 => HTLCUpdateAwaitingACK::FailHTLC {
7910 htlc_id: Readable::read(reader)?,
7911 err_packet: Readable::read(reader)?,
7913 _ => return Err(DecodeError::InvalidValue),
7917 let resend_order = match <u8 as Readable>::read(reader)? {
7918 0 => RAACommitmentOrder::CommitmentFirst,
7919 1 => RAACommitmentOrder::RevokeAndACKFirst,
7920 _ => return Err(DecodeError::InvalidValue),
7923 let monitor_pending_channel_ready = Readable::read(reader)?;
7924 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7925 let monitor_pending_commitment_signed = Readable::read(reader)?;
7927 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7928 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7929 for _ in 0..monitor_pending_forwards_count {
7930 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7933 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7934 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7935 for _ in 0..monitor_pending_failures_count {
7936 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7939 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7941 let holding_cell_update_fee = Readable::read(reader)?;
7943 let next_holder_htlc_id = Readable::read(reader)?;
7944 let next_counterparty_htlc_id = Readable::read(reader)?;
7945 let update_time_counter = Readable::read(reader)?;
7946 let feerate_per_kw = Readable::read(reader)?;
7948 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7949 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7950 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7951 // consider the stale state on reload.
7952 match <u8 as Readable>::read(reader)? {
7955 let _: u32 = Readable::read(reader)?;
7956 let _: u64 = Readable::read(reader)?;
7957 let _: Signature = Readable::read(reader)?;
7959 _ => return Err(DecodeError::InvalidValue),
7962 let funding_tx_confirmed_in = Readable::read(reader)?;
7963 let funding_tx_confirmation_height = Readable::read(reader)?;
7964 let short_channel_id = Readable::read(reader)?;
7966 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7967 let holder_dust_limit_satoshis = Readable::read(reader)?;
7968 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7969 let mut counterparty_selected_channel_reserve_satoshis = None;
7971 // Read the old serialization from version 0.0.98.
7972 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7974 // Read the 8 bytes of backwards-compatibility data.
7975 let _dummy: u64 = Readable::read(reader)?;
7977 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7978 let holder_htlc_minimum_msat = Readable::read(reader)?;
7979 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7981 let mut minimum_depth = None;
7983 // Read the old serialization from version 0.0.98.
7984 minimum_depth = Some(Readable::read(reader)?);
7986 // Read the 4 bytes of backwards-compatibility data.
7987 let _dummy: u32 = Readable::read(reader)?;
7990 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7992 1 => Some(CounterpartyForwardingInfo {
7993 fee_base_msat: Readable::read(reader)?,
7994 fee_proportional_millionths: Readable::read(reader)?,
7995 cltv_expiry_delta: Readable::read(reader)?,
7997 _ => return Err(DecodeError::InvalidValue),
8000 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8001 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8003 let counterparty_cur_commitment_point = Readable::read(reader)?;
8005 let counterparty_prev_commitment_point = Readable::read(reader)?;
8006 let counterparty_node_id = Readable::read(reader)?;
8008 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8009 let commitment_secrets = Readable::read(reader)?;
8011 let channel_update_status = Readable::read(reader)?;
8013 #[cfg(any(test, fuzzing))]
8014 let mut historical_inbound_htlc_fulfills = HashSet::new();
8015 #[cfg(any(test, fuzzing))]
8017 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8018 for _ in 0..htlc_fulfills_len {
8019 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8023 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8024 Some((feerate, if channel_parameters.is_outbound_from_holder {
8025 FeeUpdateState::Outbound
8027 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8033 let mut announcement_sigs = None;
8034 let mut target_closing_feerate_sats_per_kw = None;
8035 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8036 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8037 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8038 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8039 // only, so we default to that if none was written.
8040 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8041 let mut channel_creation_height = Some(serialized_height);
8042 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8044 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8045 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8046 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8047 let mut latest_inbound_scid_alias = None;
8048 let mut outbound_scid_alias = None;
8049 let mut channel_pending_event_emitted = None;
8050 let mut channel_ready_event_emitted = None;
8052 let mut user_id_high_opt: Option<u64> = None;
8053 let mut channel_keys_id: Option<[u8; 32]> = None;
8054 let mut temporary_channel_id: Option<ChannelId> = None;
8055 let mut holder_max_accepted_htlcs: Option<u16> = None;
8057 let mut blocked_monitor_updates = Some(Vec::new());
8059 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8060 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8062 let mut is_batch_funding: Option<()> = None;
8064 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8065 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8067 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8069 read_tlv_fields!(reader, {
8070 (0, announcement_sigs, option),
8071 (1, minimum_depth, option),
8072 (2, channel_type, option),
8073 (3, counterparty_selected_channel_reserve_satoshis, option),
8074 (4, holder_selected_channel_reserve_satoshis, option),
8075 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8076 (6, holder_max_htlc_value_in_flight_msat, option),
8077 (7, shutdown_scriptpubkey, option),
8078 (8, blocked_monitor_updates, optional_vec),
8079 (9, target_closing_feerate_sats_per_kw, option),
8080 (11, monitor_pending_finalized_fulfills, optional_vec),
8081 (13, channel_creation_height, option),
8082 (15, preimages_opt, optional_vec),
8083 (17, announcement_sigs_state, option),
8084 (19, latest_inbound_scid_alias, option),
8085 (21, outbound_scid_alias, option),
8086 (23, channel_ready_event_emitted, option),
8087 (25, user_id_high_opt, option),
8088 (27, channel_keys_id, option),
8089 (28, holder_max_accepted_htlcs, option),
8090 (29, temporary_channel_id, option),
8091 (31, channel_pending_event_emitted, option),
8092 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8093 (37, holding_cell_skimmed_fees_opt, optional_vec),
8094 (38, is_batch_funding, option),
8095 (39, pending_outbound_blinding_points_opt, optional_vec),
8096 (41, holding_cell_blinding_points_opt, optional_vec),
8097 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8100 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8101 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8102 // If we've gotten to the funding stage of the channel, populate the signer with its
8103 // required channel parameters.
8104 if channel_state >= ChannelState::FundingNegotiated {
8105 holder_signer.provide_channel_parameters(&channel_parameters);
8107 (channel_keys_id, holder_signer)
8109 // `keys_data` can be `None` if we had corrupted data.
8110 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8111 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8112 (holder_signer.channel_keys_id(), holder_signer)
8115 if let Some(preimages) = preimages_opt {
8116 let mut iter = preimages.into_iter();
8117 for htlc in pending_outbound_htlcs.iter_mut() {
8119 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8120 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8122 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8123 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8128 // We expect all preimages to be consumed above
8129 if iter.next().is_some() {
8130 return Err(DecodeError::InvalidValue);
8134 let chan_features = channel_type.as_ref().unwrap();
8135 if !chan_features.is_subset(our_supported_features) {
8136 // If the channel was written by a new version and negotiated with features we don't
8137 // understand yet, refuse to read it.
8138 return Err(DecodeError::UnknownRequiredFeature);
8141 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8142 // To account for that, we're proactively setting/overriding the field here.
8143 channel_parameters.channel_type_features = chan_features.clone();
8145 let mut secp_ctx = Secp256k1::new();
8146 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8148 // `user_id` used to be a single u64 value. In order to remain backwards
8149 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8150 // separate u64 values.
8151 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8153 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8155 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8156 let mut iter = skimmed_fees.into_iter();
8157 for htlc in pending_outbound_htlcs.iter_mut() {
8158 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8160 // We expect all skimmed fees to be consumed above
8161 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8163 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8164 let mut iter = skimmed_fees.into_iter();
8165 for htlc in holding_cell_htlc_updates.iter_mut() {
8166 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8167 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8170 // We expect all skimmed fees to be consumed above
8171 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8173 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8174 let mut iter = blinding_pts.into_iter();
8175 for htlc in pending_outbound_htlcs.iter_mut() {
8176 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8178 // We expect all blinding points to be consumed above
8179 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8181 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8182 let mut iter = blinding_pts.into_iter();
8183 for htlc in holding_cell_htlc_updates.iter_mut() {
8184 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8185 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8188 // We expect all blinding points to be consumed above
8189 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8192 if let Some(malformed_htlcs) = malformed_htlcs {
8193 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8194 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8195 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8196 let matches = *htlc_id == malformed_htlc_id;
8197 if matches { debug_assert!(err_packet.data.is_empty()) }
8200 }).ok_or(DecodeError::InvalidValue)?;
8201 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8202 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8204 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8209 context: ChannelContext {
8212 config: config.unwrap(),
8216 // Note that we don't care about serializing handshake limits as we only ever serialize
8217 // channel data after the handshake has completed.
8218 inbound_handshake_limits_override: None,
8221 temporary_channel_id,
8223 announcement_sigs_state: announcement_sigs_state.unwrap(),
8225 channel_value_satoshis,
8227 latest_monitor_update_id,
8229 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8230 shutdown_scriptpubkey,
8233 cur_holder_commitment_transaction_number,
8234 cur_counterparty_commitment_transaction_number,
8237 holder_max_accepted_htlcs,
8238 pending_inbound_htlcs,
8239 pending_outbound_htlcs,
8240 holding_cell_htlc_updates,
8244 monitor_pending_channel_ready,
8245 monitor_pending_revoke_and_ack,
8246 monitor_pending_commitment_signed,
8247 monitor_pending_forwards,
8248 monitor_pending_failures,
8249 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8251 signer_pending_commitment_update: false,
8252 signer_pending_funding: false,
8255 holding_cell_update_fee,
8256 next_holder_htlc_id,
8257 next_counterparty_htlc_id,
8258 update_time_counter,
8261 #[cfg(debug_assertions)]
8262 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8263 #[cfg(debug_assertions)]
8264 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8266 last_sent_closing_fee: None,
8267 pending_counterparty_closing_signed: None,
8268 expecting_peer_commitment_signed: false,
8269 closing_fee_limits: None,
8270 target_closing_feerate_sats_per_kw,
8272 funding_tx_confirmed_in,
8273 funding_tx_confirmation_height,
8275 channel_creation_height: channel_creation_height.unwrap(),
8277 counterparty_dust_limit_satoshis,
8278 holder_dust_limit_satoshis,
8279 counterparty_max_htlc_value_in_flight_msat,
8280 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8281 counterparty_selected_channel_reserve_satoshis,
8282 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8283 counterparty_htlc_minimum_msat,
8284 holder_htlc_minimum_msat,
8285 counterparty_max_accepted_htlcs,
8288 counterparty_forwarding_info,
8290 channel_transaction_parameters: channel_parameters,
8291 funding_transaction,
8294 counterparty_cur_commitment_point,
8295 counterparty_prev_commitment_point,
8296 counterparty_node_id,
8298 counterparty_shutdown_scriptpubkey,
8302 channel_update_status,
8303 closing_signed_in_flight: false,
8307 #[cfg(any(test, fuzzing))]
8308 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8309 #[cfg(any(test, fuzzing))]
8310 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8312 workaround_lnd_bug_4006: None,
8313 sent_message_awaiting_response: None,
8315 latest_inbound_scid_alias,
8316 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8317 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8319 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8320 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8322 #[cfg(any(test, fuzzing))]
8323 historical_inbound_htlc_fulfills,
8325 channel_type: channel_type.unwrap(),
8328 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8337 use bitcoin::blockdata::constants::ChainHash;
8338 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8339 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8340 use bitcoin::blockdata::opcodes;
8341 use bitcoin::network::constants::Network;
8342 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8343 use crate::ln::{PaymentHash, PaymentPreimage};
8344 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8345 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8346 use crate::ln::channel::InitFeatures;
8347 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8348 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8349 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8350 use crate::ln::msgs;
8351 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8352 use crate::ln::script::ShutdownScript;
8353 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8354 use crate::chain::BestBlock;
8355 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8356 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8357 use crate::chain::transaction::OutPoint;
8358 use crate::routing::router::{Path, RouteHop};
8359 use crate::util::config::UserConfig;
8360 use crate::util::errors::APIError;
8361 use crate::util::ser::{ReadableArgs, Writeable};
8362 use crate::util::test_utils;
8363 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8364 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8365 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8366 use bitcoin::secp256k1::{SecretKey,PublicKey};
8367 use bitcoin::hashes::sha256::Hash as Sha256;
8368 use bitcoin::hashes::Hash;
8369 use bitcoin::hashes::hex::FromHex;
8370 use bitcoin::hash_types::WPubkeyHash;
8371 use bitcoin::blockdata::locktime::absolute::LockTime;
8372 use bitcoin::address::{WitnessProgram, WitnessVersion};
8373 use crate::prelude::*;
8376 fn test_channel_state_order() {
8377 use crate::ln::channel::NegotiatingFundingFlags;
8378 use crate::ln::channel::AwaitingChannelReadyFlags;
8379 use crate::ln::channel::ChannelReadyFlags;
8381 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
8382 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
8383 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
8384 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
8387 struct TestFeeEstimator {
8390 impl FeeEstimator for TestFeeEstimator {
8391 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8397 fn test_max_funding_satoshis_no_wumbo() {
8398 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8399 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8400 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8404 signer: InMemorySigner,
8407 impl EntropySource for Keys {
8408 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8411 impl SignerProvider for Keys {
8412 type EcdsaSigner = InMemorySigner;
8414 type TaprootSigner = InMemorySigner;
8416 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8417 self.signer.channel_keys_id()
8420 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8424 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8426 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8427 let secp_ctx = Secp256k1::signing_only();
8428 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8429 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8430 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8433 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8434 let secp_ctx = Secp256k1::signing_only();
8435 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8436 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8440 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8441 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8442 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8446 fn upfront_shutdown_script_incompatibility() {
8447 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8448 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8449 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8452 let seed = [42; 32];
8453 let network = Network::Testnet;
8454 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8455 keys_provider.expect(OnGetShutdownScriptpubkey {
8456 returns: non_v0_segwit_shutdown_script.clone(),
8459 let secp_ctx = Secp256k1::new();
8460 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8461 let config = UserConfig::default();
8462 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8463 Err(APIError::IncompatibleShutdownScript { script }) => {
8464 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8466 Err(e) => panic!("Unexpected error: {:?}", e),
8467 Ok(_) => panic!("Expected error"),
8471 // Check that, during channel creation, we use the same feerate in the open channel message
8472 // as we do in the Channel object creation itself.
8474 fn test_open_channel_msg_fee() {
8475 let original_fee = 253;
8476 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8477 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8478 let secp_ctx = Secp256k1::new();
8479 let seed = [42; 32];
8480 let network = Network::Testnet;
8481 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8483 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8484 let config = UserConfig::default();
8485 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8487 // Now change the fee so we can check that the fee in the open_channel message is the
8488 // same as the old fee.
8489 fee_est.fee_est = 500;
8490 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8491 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8495 fn test_holder_vs_counterparty_dust_limit() {
8496 // Test that when calculating the local and remote commitment transaction fees, the correct
8497 // dust limits are used.
8498 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8499 let secp_ctx = Secp256k1::new();
8500 let seed = [42; 32];
8501 let network = Network::Testnet;
8502 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8503 let logger = test_utils::TestLogger::new();
8504 let best_block = BestBlock::from_network(network);
8506 // Go through the flow of opening a channel between two nodes, making sure
8507 // they have different dust limits.
8509 // Create Node A's channel pointing to Node B's pubkey
8510 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8511 let config = UserConfig::default();
8512 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8514 // Create Node B's channel by receiving Node A's open_channel message
8515 // Make sure A's dust limit is as we expect.
8516 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8517 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8518 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8520 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8521 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8522 accept_channel_msg.dust_limit_satoshis = 546;
8523 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8524 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8526 // Node A --> Node B: funding created
8527 let output_script = node_a_chan.context.get_funding_redeemscript();
8528 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8529 value: 10000000, script_pubkey: output_script.clone(),
8531 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8532 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8533 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8535 // Node B --> Node A: funding signed
8536 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8537 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8539 // Put some inbound and outbound HTLCs in A's channel.
8540 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8541 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8543 amount_msat: htlc_amount_msat,
8544 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8545 cltv_expiry: 300000000,
8546 state: InboundHTLCState::Committed,
8549 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8551 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8552 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8553 cltv_expiry: 200000000,
8554 state: OutboundHTLCState::Committed,
8555 source: HTLCSource::OutboundRoute {
8556 path: Path { hops: Vec::new(), blinded_tail: None },
8557 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8558 first_hop_htlc_msat: 548,
8559 payment_id: PaymentId([42; 32]),
8561 skimmed_fee_msat: None,
8562 blinding_point: None,
8565 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8566 // the dust limit check.
8567 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8568 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8569 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8570 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8572 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8573 // of the HTLCs are seen to be above the dust limit.
8574 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8575 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8576 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8577 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8578 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8582 fn test_timeout_vs_success_htlc_dust_limit() {
8583 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8584 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8585 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8586 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8587 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8588 let secp_ctx = Secp256k1::new();
8589 let seed = [42; 32];
8590 let network = Network::Testnet;
8591 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8593 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8594 let config = UserConfig::default();
8595 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8597 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8598 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8600 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8601 // counted as dust when it shouldn't be.
8602 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8603 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8604 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8605 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8607 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8608 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8609 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8610 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8611 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8613 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8615 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8616 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8617 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8618 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8619 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8621 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8622 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8623 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8624 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8625 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8629 fn channel_reestablish_no_updates() {
8630 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8631 let logger = test_utils::TestLogger::new();
8632 let secp_ctx = Secp256k1::new();
8633 let seed = [42; 32];
8634 let network = Network::Testnet;
8635 let best_block = BestBlock::from_network(network);
8636 let chain_hash = ChainHash::using_genesis_block(network);
8637 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8639 // Go through the flow of opening a channel between two nodes.
8641 // Create Node A's channel pointing to Node B's pubkey
8642 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8643 let config = UserConfig::default();
8644 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8646 // Create Node B's channel by receiving Node A's open_channel message
8647 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8648 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8649 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8651 // Node B --> Node A: accept channel
8652 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8653 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8655 // Node A --> Node B: funding created
8656 let output_script = node_a_chan.context.get_funding_redeemscript();
8657 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8658 value: 10000000, script_pubkey: output_script.clone(),
8660 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8661 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8662 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8664 // Node B --> Node A: funding signed
8665 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8666 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8668 // Now disconnect the two nodes and check that the commitment point in
8669 // Node B's channel_reestablish message is sane.
8670 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8671 let msg = node_b_chan.get_channel_reestablish(&&logger);
8672 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8673 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8674 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8676 // Check that the commitment point in Node A's channel_reestablish message
8678 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8679 let msg = node_a_chan.get_channel_reestablish(&&logger);
8680 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8681 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8682 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8686 fn test_configured_holder_max_htlc_value_in_flight() {
8687 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8688 let logger = test_utils::TestLogger::new();
8689 let secp_ctx = Secp256k1::new();
8690 let seed = [42; 32];
8691 let network = Network::Testnet;
8692 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8693 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8694 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8696 let mut config_2_percent = UserConfig::default();
8697 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8698 let mut config_99_percent = UserConfig::default();
8699 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8700 let mut config_0_percent = UserConfig::default();
8701 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8702 let mut config_101_percent = UserConfig::default();
8703 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8705 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8706 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8707 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8708 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8709 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8710 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8712 // Test with the upper bound - 1 of valid values (99%).
8713 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8714 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8715 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8717 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8719 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8720 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8721 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8722 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8723 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8724 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8726 // Test with the upper bound - 1 of valid values (99%).
8727 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8728 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8729 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8731 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8732 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8733 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8734 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8735 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8737 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8738 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8740 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8741 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8742 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8744 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8745 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8746 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8747 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8748 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8750 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8751 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8753 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8754 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8755 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8759 fn test_configured_holder_selected_channel_reserve_satoshis() {
8761 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8762 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8763 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8765 // Test with valid but unreasonably high channel reserves
8766 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8767 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8768 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8770 // Test with calculated channel reserve less than lower bound
8771 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8772 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8774 // Test with invalid channel reserves since sum of both is greater than or equal
8776 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8777 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8780 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8781 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8782 let logger = test_utils::TestLogger::new();
8783 let secp_ctx = Secp256k1::new();
8784 let seed = [42; 32];
8785 let network = Network::Testnet;
8786 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8787 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8788 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8791 let mut outbound_node_config = UserConfig::default();
8792 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8793 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8795 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8796 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8798 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8799 let mut inbound_node_config = UserConfig::default();
8800 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8802 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8803 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8805 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8807 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8808 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8810 // Channel Negotiations failed
8811 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8812 assert!(result.is_err());
8817 fn channel_update() {
8818 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8819 let logger = test_utils::TestLogger::new();
8820 let secp_ctx = Secp256k1::new();
8821 let seed = [42; 32];
8822 let network = Network::Testnet;
8823 let best_block = BestBlock::from_network(network);
8824 let chain_hash = ChainHash::using_genesis_block(network);
8825 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8827 // Create Node A's channel pointing to Node B's pubkey
8828 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8829 let config = UserConfig::default();
8830 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8832 // Create Node B's channel by receiving Node A's open_channel message
8833 // Make sure A's dust limit is as we expect.
8834 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8835 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8836 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8838 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8839 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8840 accept_channel_msg.dust_limit_satoshis = 546;
8841 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8842 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8844 // Node A --> Node B: funding created
8845 let output_script = node_a_chan.context.get_funding_redeemscript();
8846 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8847 value: 10000000, script_pubkey: output_script.clone(),
8849 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8850 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8851 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8853 // Node B --> Node A: funding signed
8854 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8855 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8857 // Make sure that receiving a channel update will update the Channel as expected.
8858 let update = ChannelUpdate {
8859 contents: UnsignedChannelUpdate {
8861 short_channel_id: 0,
8864 cltv_expiry_delta: 100,
8865 htlc_minimum_msat: 5,
8866 htlc_maximum_msat: MAX_VALUE_MSAT,
8868 fee_proportional_millionths: 11,
8869 excess_data: Vec::new(),
8871 signature: Signature::from(unsafe { FFISignature::new() })
8873 assert!(node_a_chan.channel_update(&update).unwrap());
8875 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8876 // change our official htlc_minimum_msat.
8877 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8878 match node_a_chan.context.counterparty_forwarding_info() {
8880 assert_eq!(info.cltv_expiry_delta, 100);
8881 assert_eq!(info.fee_base_msat, 110);
8882 assert_eq!(info.fee_proportional_millionths, 11);
8884 None => panic!("expected counterparty forwarding info to be Some")
8887 assert!(!node_a_chan.channel_update(&update).unwrap());
8891 fn blinding_point_skimmed_fee_malformed_ser() {
8892 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8894 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8895 let secp_ctx = Secp256k1::new();
8896 let seed = [42; 32];
8897 let network = Network::Testnet;
8898 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8900 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8901 let config = UserConfig::default();
8902 let features = channelmanager::provided_init_features(&config);
8903 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8904 let mut chan = Channel { context: outbound_chan.context };
8906 let dummy_htlc_source = HTLCSource::OutboundRoute {
8908 hops: vec![RouteHop {
8909 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8910 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8911 cltv_expiry_delta: 0, maybe_announced_channel: false,
8915 session_priv: test_utils::privkey(42),
8916 first_hop_htlc_msat: 0,
8917 payment_id: PaymentId([42; 32]),
8919 let dummy_outbound_output = OutboundHTLCOutput {
8922 payment_hash: PaymentHash([43; 32]),
8924 state: OutboundHTLCState::Committed,
8925 source: dummy_htlc_source.clone(),
8926 skimmed_fee_msat: None,
8927 blinding_point: None,
8929 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8930 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8932 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8935 htlc.skimmed_fee_msat = Some(1);
8938 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8940 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8943 payment_hash: PaymentHash([43; 32]),
8944 source: dummy_htlc_source.clone(),
8945 onion_routing_packet: msgs::OnionPacket {
8947 public_key: Ok(test_utils::pubkey(1)),
8948 hop_data: [0; 20*65],
8951 skimmed_fee_msat: None,
8952 blinding_point: None,
8954 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8955 payment_preimage: PaymentPreimage([42; 32]),
8958 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8959 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8961 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8962 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8964 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
8967 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8968 } else if i % 5 == 1 {
8969 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8970 } else if i % 5 == 2 {
8971 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8972 if let HTLCUpdateAwaitingACK::AddHTLC {
8973 ref mut blinding_point, ref mut skimmed_fee_msat, ..
8974 } = &mut dummy_add {
8975 *blinding_point = Some(test_utils::pubkey(42 + i));
8976 *skimmed_fee_msat = Some(42);
8978 holding_cell_htlc_updates.push(dummy_add);
8979 } else if i % 5 == 3 {
8980 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
8982 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
8985 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8987 // Encode and decode the channel and ensure that the HTLCs within are the same.
8988 let encoded_chan = chan.encode();
8989 let mut s = crate::io::Cursor::new(&encoded_chan);
8990 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8991 let features = channelmanager::provided_channel_type_features(&config);
8992 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8993 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8994 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8997 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8999 fn outbound_commitment_test() {
9000 use bitcoin::sighash;
9001 use bitcoin::consensus::encode::serialize;
9002 use bitcoin::sighash::EcdsaSighashType;
9003 use bitcoin::hashes::hex::FromHex;
9004 use bitcoin::hash_types::Txid;
9005 use bitcoin::secp256k1::Message;
9006 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9007 use crate::ln::PaymentPreimage;
9008 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9009 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9010 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9011 use crate::util::logger::Logger;
9012 use crate::sync::Arc;
9013 use core::str::FromStr;
9014 use hex::DisplayHex;
9016 // Test vectors from BOLT 3 Appendices C and F (anchors):
9017 let feeest = TestFeeEstimator{fee_est: 15000};
9018 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9019 let secp_ctx = Secp256k1::new();
9021 let mut signer = InMemorySigner::new(
9023 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9024 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9025 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9026 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9027 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9029 // These aren't set in the test vectors:
9030 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9036 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9037 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9038 let keys_provider = Keys { signer: signer.clone() };
9040 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9041 let mut config = UserConfig::default();
9042 config.channel_handshake_config.announced_channel = false;
9043 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9044 chan.context.holder_dust_limit_satoshis = 546;
9045 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9047 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9049 let counterparty_pubkeys = ChannelPublicKeys {
9050 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9051 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9052 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9053 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9054 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9056 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9057 CounterpartyChannelTransactionParameters {
9058 pubkeys: counterparty_pubkeys.clone(),
9059 selected_contest_delay: 144
9061 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9062 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9064 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9065 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9067 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9068 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9070 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9071 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9073 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9074 // derived from a commitment_seed, so instead we copy it here and call
9075 // build_commitment_transaction.
9076 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9077 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9078 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9079 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9080 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9082 macro_rules! test_commitment {
9083 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9084 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9085 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9089 macro_rules! test_commitment_with_anchors {
9090 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9091 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9092 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9096 macro_rules! test_commitment_common {
9097 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9098 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9100 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9101 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9103 let htlcs = commitment_stats.htlcs_included.drain(..)
9104 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9106 (commitment_stats.tx, htlcs)
9108 let trusted_tx = commitment_tx.trust();
9109 let unsigned_tx = trusted_tx.built_transaction();
9110 let redeemscript = chan.context.get_funding_redeemscript();
9111 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9112 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9113 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9114 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9116 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9117 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9118 let mut counterparty_htlc_sigs = Vec::new();
9119 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9121 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9122 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9123 counterparty_htlc_sigs.push(remote_signature);
9125 assert_eq!(htlcs.len(), per_htlc.len());
9127 let holder_commitment_tx = HolderCommitmentTransaction::new(
9128 commitment_tx.clone(),
9129 counterparty_signature,
9130 counterparty_htlc_sigs,
9131 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9132 chan.context.counterparty_funding_pubkey()
9134 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9135 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9137 let funding_redeemscript = chan.context.get_funding_redeemscript();
9138 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9139 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9141 // ((htlc, counterparty_sig), (index, holder_sig))
9142 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9145 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9146 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9148 let ref htlc = htlcs[$htlc_idx];
9149 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9150 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9151 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9152 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9153 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9154 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9155 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9157 let mut preimage: Option<PaymentPreimage> = None;
9160 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9161 if out == htlc.payment_hash {
9162 preimage = Some(PaymentPreimage([i; 32]));
9166 assert!(preimage.is_some());
9169 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9170 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9171 channel_derivation_parameters: ChannelDerivationParameters {
9172 value_satoshis: chan.context.channel_value_satoshis,
9173 keys_id: chan.context.channel_keys_id,
9174 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9176 commitment_txid: trusted_tx.txid(),
9177 per_commitment_number: trusted_tx.commitment_number(),
9178 per_commitment_point: trusted_tx.per_commitment_point(),
9179 feerate_per_kw: trusted_tx.feerate_per_kw(),
9181 preimage: preimage.clone(),
9182 counterparty_sig: *htlc_counterparty_sig,
9183 }, &secp_ctx).unwrap();
9184 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9185 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9187 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9188 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9189 let trusted_tx = holder_commitment_tx.trust();
9190 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9191 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9192 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9194 assert!(htlc_counterparty_sig_iter.next().is_none());
9198 // anchors: simple commitment tx with no HTLCs and single anchor
9199 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9200 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9201 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9203 // simple commitment tx with no HTLCs
9204 chan.context.value_to_self_msat = 7000000000;
9206 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9207 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9208 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9210 // anchors: simple commitment tx with no HTLCs
9211 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9212 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9213 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9215 chan.context.pending_inbound_htlcs.push({
9216 let mut out = InboundHTLCOutput{
9218 amount_msat: 1000000,
9220 payment_hash: PaymentHash([0; 32]),
9221 state: InboundHTLCState::Committed,
9223 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9226 chan.context.pending_inbound_htlcs.push({
9227 let mut out = InboundHTLCOutput{
9229 amount_msat: 2000000,
9231 payment_hash: PaymentHash([0; 32]),
9232 state: InboundHTLCState::Committed,
9234 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9237 chan.context.pending_outbound_htlcs.push({
9238 let mut out = OutboundHTLCOutput{
9240 amount_msat: 2000000,
9242 payment_hash: PaymentHash([0; 32]),
9243 state: OutboundHTLCState::Committed,
9244 source: HTLCSource::dummy(),
9245 skimmed_fee_msat: None,
9246 blinding_point: None,
9248 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9251 chan.context.pending_outbound_htlcs.push({
9252 let mut out = OutboundHTLCOutput{
9254 amount_msat: 3000000,
9256 payment_hash: PaymentHash([0; 32]),
9257 state: OutboundHTLCState::Committed,
9258 source: HTLCSource::dummy(),
9259 skimmed_fee_msat: None,
9260 blinding_point: None,
9262 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9265 chan.context.pending_inbound_htlcs.push({
9266 let mut out = InboundHTLCOutput{
9268 amount_msat: 4000000,
9270 payment_hash: PaymentHash([0; 32]),
9271 state: InboundHTLCState::Committed,
9273 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9277 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9278 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9279 chan.context.feerate_per_kw = 0;
9281 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9282 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9283 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9286 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9287 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9288 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9291 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9292 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9293 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9296 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9297 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9298 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9301 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9302 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9303 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9306 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9307 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9308 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9311 // commitment tx with seven outputs untrimmed (maximum feerate)
9312 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9313 chan.context.feerate_per_kw = 647;
9315 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9316 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9317 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9320 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9321 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9322 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9325 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9326 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9327 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9330 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9331 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9332 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9335 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9336 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9337 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9340 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9341 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9342 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9345 // commitment tx with six outputs untrimmed (minimum feerate)
9346 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9347 chan.context.feerate_per_kw = 648;
9349 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9350 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9351 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9354 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9355 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9356 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9359 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9360 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9361 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9364 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9365 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9366 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9369 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9370 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9371 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9374 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9375 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9376 chan.context.feerate_per_kw = 645;
9377 chan.context.holder_dust_limit_satoshis = 1001;
9379 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9380 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9381 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9384 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9385 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9386 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9389 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9390 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9391 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9394 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9395 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9396 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9399 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9400 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9401 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9404 // commitment tx with six outputs untrimmed (maximum feerate)
9405 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9406 chan.context.feerate_per_kw = 2069;
9407 chan.context.holder_dust_limit_satoshis = 546;
9409 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9410 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9411 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9414 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9415 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9416 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9419 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9420 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9421 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9424 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9425 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9426 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9429 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9430 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9431 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9434 // commitment tx with five outputs untrimmed (minimum feerate)
9435 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9436 chan.context.feerate_per_kw = 2070;
9438 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9439 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9440 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9443 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9444 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9445 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9448 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9449 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9450 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9453 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9454 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9455 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9458 // commitment tx with five outputs untrimmed (maximum feerate)
9459 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9460 chan.context.feerate_per_kw = 2194;
9462 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9463 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9464 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9467 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9468 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9469 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9472 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9473 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9474 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9477 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9478 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9479 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9482 // commitment tx with four outputs untrimmed (minimum feerate)
9483 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9484 chan.context.feerate_per_kw = 2195;
9486 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9487 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9488 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9491 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9492 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9493 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9496 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9497 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9498 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9501 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9502 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9503 chan.context.feerate_per_kw = 2185;
9504 chan.context.holder_dust_limit_satoshis = 2001;
9505 let cached_channel_type = chan.context.channel_type;
9506 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9508 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9509 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9510 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9513 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9514 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9515 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9518 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9519 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9520 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9523 // commitment tx with four outputs untrimmed (maximum feerate)
9524 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9525 chan.context.feerate_per_kw = 3702;
9526 chan.context.holder_dust_limit_satoshis = 546;
9527 chan.context.channel_type = cached_channel_type.clone();
9529 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9530 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9531 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9534 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9535 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9536 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9539 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9540 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9541 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9544 // commitment tx with three outputs untrimmed (minimum feerate)
9545 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9546 chan.context.feerate_per_kw = 3703;
9548 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9549 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9550 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9553 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9554 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9555 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9558 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9559 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9560 chan.context.feerate_per_kw = 3687;
9561 chan.context.holder_dust_limit_satoshis = 3001;
9562 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9564 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9565 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9566 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9569 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9570 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9571 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9574 // commitment tx with three outputs untrimmed (maximum feerate)
9575 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9576 chan.context.feerate_per_kw = 4914;
9577 chan.context.holder_dust_limit_satoshis = 546;
9578 chan.context.channel_type = cached_channel_type.clone();
9580 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9581 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9582 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9585 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9586 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9587 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9590 // commitment tx with two outputs untrimmed (minimum feerate)
9591 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9592 chan.context.feerate_per_kw = 4915;
9593 chan.context.holder_dust_limit_satoshis = 546;
9595 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9596 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9597 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9599 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9600 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9601 chan.context.feerate_per_kw = 4894;
9602 chan.context.holder_dust_limit_satoshis = 4001;
9603 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9605 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9606 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9607 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9609 // commitment tx with two outputs untrimmed (maximum feerate)
9610 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9611 chan.context.feerate_per_kw = 9651180;
9612 chan.context.holder_dust_limit_satoshis = 546;
9613 chan.context.channel_type = cached_channel_type.clone();
9615 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9616 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9617 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9619 // commitment tx with one output untrimmed (minimum feerate)
9620 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9621 chan.context.feerate_per_kw = 9651181;
9623 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9624 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9625 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9627 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9628 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9629 chan.context.feerate_per_kw = 6216010;
9630 chan.context.holder_dust_limit_satoshis = 4001;
9631 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9633 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9634 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9635 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9637 // commitment tx with fee greater than funder amount
9638 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9639 chan.context.feerate_per_kw = 9651936;
9640 chan.context.holder_dust_limit_satoshis = 546;
9641 chan.context.channel_type = cached_channel_type;
9643 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9644 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9645 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9647 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9648 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9649 chan.context.feerate_per_kw = 253;
9650 chan.context.pending_inbound_htlcs.clear();
9651 chan.context.pending_inbound_htlcs.push({
9652 let mut out = InboundHTLCOutput{
9654 amount_msat: 2000000,
9656 payment_hash: PaymentHash([0; 32]),
9657 state: InboundHTLCState::Committed,
9659 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9662 chan.context.pending_outbound_htlcs.clear();
9663 chan.context.pending_outbound_htlcs.push({
9664 let mut out = OutboundHTLCOutput{
9666 amount_msat: 5000001,
9668 payment_hash: PaymentHash([0; 32]),
9669 state: OutboundHTLCState::Committed,
9670 source: HTLCSource::dummy(),
9671 skimmed_fee_msat: None,
9672 blinding_point: None,
9674 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9677 chan.context.pending_outbound_htlcs.push({
9678 let mut out = OutboundHTLCOutput{
9680 amount_msat: 5000000,
9682 payment_hash: PaymentHash([0; 32]),
9683 state: OutboundHTLCState::Committed,
9684 source: HTLCSource::dummy(),
9685 skimmed_fee_msat: None,
9686 blinding_point: None,
9688 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9692 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9693 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9694 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9697 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9698 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9699 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9701 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9702 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9703 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9705 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9706 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9707 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9710 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9711 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9712 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9713 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9716 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9717 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9718 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9720 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9721 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9722 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9724 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9725 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9726 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9731 fn test_per_commitment_secret_gen() {
9732 // Test vectors from BOLT 3 Appendix D:
9734 let mut seed = [0; 32];
9735 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9736 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9737 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9739 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9740 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9741 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9743 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9744 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9746 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9747 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9749 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9750 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9751 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9755 fn test_key_derivation() {
9756 // Test vectors from BOLT 3 Appendix E:
9757 let secp_ctx = Secp256k1::new();
9759 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9760 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9762 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9763 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9765 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9766 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9768 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9769 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9771 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9772 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9774 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9775 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9779 fn test_zero_conf_channel_type_support() {
9780 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9781 let secp_ctx = Secp256k1::new();
9782 let seed = [42; 32];
9783 let network = Network::Testnet;
9784 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9785 let logger = test_utils::TestLogger::new();
9787 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9788 let config = UserConfig::default();
9789 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9790 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9792 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9793 channel_type_features.set_zero_conf_required();
9795 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9796 open_channel_msg.channel_type = Some(channel_type_features);
9797 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9798 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9799 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9800 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9801 assert!(res.is_ok());
9805 fn test_supports_anchors_zero_htlc_tx_fee() {
9806 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9807 // resulting `channel_type`.
9808 let secp_ctx = Secp256k1::new();
9809 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9810 let network = Network::Testnet;
9811 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9812 let logger = test_utils::TestLogger::new();
9814 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9815 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9817 let mut config = UserConfig::default();
9818 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9820 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9821 // need to signal it.
9822 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9823 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9824 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9825 &config, 0, 42, None
9827 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9829 let mut expected_channel_type = ChannelTypeFeatures::empty();
9830 expected_channel_type.set_static_remote_key_required();
9831 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9833 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9834 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9835 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9839 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9840 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9841 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9842 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9843 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9846 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9847 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9851 fn test_rejects_implicit_simple_anchors() {
9852 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9853 // each side's `InitFeatures`, it is rejected.
9854 let secp_ctx = Secp256k1::new();
9855 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9856 let network = Network::Testnet;
9857 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9858 let logger = test_utils::TestLogger::new();
9860 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9861 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9863 let config = UserConfig::default();
9865 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9866 let static_remote_key_required: u64 = 1 << 12;
9867 let simple_anchors_required: u64 = 1 << 20;
9868 let raw_init_features = static_remote_key_required | simple_anchors_required;
9869 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9871 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9872 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9873 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9877 // Set `channel_type` to `None` to force the implicit feature negotiation.
9878 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9879 open_channel_msg.channel_type = None;
9881 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9882 // `static_remote_key`, it will fail the channel.
9883 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9884 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9885 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9886 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9888 assert!(channel_b.is_err());
9892 fn test_rejects_simple_anchors_channel_type() {
9893 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9895 let secp_ctx = Secp256k1::new();
9896 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9897 let network = Network::Testnet;
9898 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9899 let logger = test_utils::TestLogger::new();
9901 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9902 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9904 let config = UserConfig::default();
9906 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9907 let static_remote_key_required: u64 = 1 << 12;
9908 let simple_anchors_required: u64 = 1 << 20;
9909 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9910 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9911 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9912 assert!(!simple_anchors_init.requires_unknown_bits());
9913 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9915 // First, we'll try to open a channel between A and B where A requests a channel type for
9916 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9917 // B as it's not supported by LDK.
9918 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9919 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9920 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9924 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9925 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9927 let res = InboundV1Channel::<&TestKeysInterface>::new(
9928 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9929 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9930 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9932 assert!(res.is_err());
9934 // Then, we'll try to open another channel where A requests a channel type for
9935 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9936 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9938 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9939 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9940 10000000, 100000, 42, &config, 0, 42, None
9943 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9945 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9946 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9947 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9948 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9951 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9952 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9954 let res = channel_a.accept_channel(
9955 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9957 assert!(res.is_err());
9961 fn test_waiting_for_batch() {
9962 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9963 let logger = test_utils::TestLogger::new();
9964 let secp_ctx = Secp256k1::new();
9965 let seed = [42; 32];
9966 let network = Network::Testnet;
9967 let best_block = BestBlock::from_network(network);
9968 let chain_hash = ChainHash::using_genesis_block(network);
9969 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9971 let mut config = UserConfig::default();
9972 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9973 // channel in a batch before all channels are ready.
9974 config.channel_handshake_limits.trust_own_funding_0conf = true;
9976 // Create a channel from node a to node b that will be part of batch funding.
9977 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9978 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9983 &channelmanager::provided_init_features(&config),
9993 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9994 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9995 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10000 &channelmanager::provided_channel_type_features(&config),
10001 &channelmanager::provided_init_features(&config),
10007 true, // Allow node b to send a 0conf channel_ready.
10010 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10011 node_a_chan.accept_channel(
10012 &accept_channel_msg,
10013 &config.channel_handshake_limits,
10014 &channelmanager::provided_init_features(&config),
10017 // Fund the channel with a batch funding transaction.
10018 let output_script = node_a_chan.context.get_funding_redeemscript();
10019 let tx = Transaction {
10021 lock_time: LockTime::ZERO,
10025 value: 10000000, script_pubkey: output_script.clone(),
10028 value: 10000000, script_pubkey: Builder::new().into_script(),
10031 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10032 let funding_created_msg = node_a_chan.get_funding_created(
10033 tx.clone(), funding_outpoint, true, &&logger,
10034 ).map_err(|_| ()).unwrap();
10035 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10036 &funding_created_msg.unwrap(),
10040 ).map_err(|_| ()).unwrap();
10041 let node_b_updates = node_b_chan.monitor_updating_restored(
10049 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10050 // broadcasting the funding transaction until the batch is ready.
10051 let res = node_a_chan.funding_signed(
10052 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10054 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10055 let node_a_updates = node_a_chan.monitor_updating_restored(
10062 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10063 // as the funding transaction depends on all channels in the batch becoming ready.
10064 assert!(node_a_updates.channel_ready.is_none());
10065 assert!(node_a_updates.funding_broadcastable.is_none());
10066 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10068 // It is possible to receive a 0conf channel_ready from the remote node.
10069 node_a_chan.channel_ready(
10070 &node_b_updates.channel_ready.unwrap(),
10078 node_a_chan.context.channel_state,
10079 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10082 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10083 node_a_chan.set_batch_ready();
10084 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10085 assert!(node_a_chan.check_get_channel_ready(0).is_some());