1 // This file is Copyright its original authors, visible in version control
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
62 pub struct ChannelValueStat {
63 pub value_to_self_msat: u64,
64 pub channel_value_msat: u64,
65 pub channel_reserve_msat: u64,
66 pub pending_outbound_htlcs_amount_msat: u64,
67 pub pending_inbound_htlcs_amount_msat: u64,
68 pub holding_cell_outbound_amount_msat: u64,
69 pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70 pub counterparty_dust_limit_msat: u64,
73 pub struct AvailableBalances {
74 /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75 pub balance_msat: u64,
76 /// Total amount available for our counterparty to send to us.
77 pub inbound_capacity_msat: u64,
78 /// Total amount available for us to send to our counterparty.
79 pub outbound_capacity_msat: u64,
80 /// The maximum value we can assign to the next outbound HTLC
81 pub next_outbound_htlc_limit_msat: u64,
82 /// The minimum value we can assign to the next outbound HTLC
83 pub next_outbound_htlc_minimum_msat: u64,
86 #[derive(Debug, Clone, Copy, PartialEq)]
88 // Inbound states mirroring InboundHTLCState
90 AwaitingRemoteRevokeToAnnounce,
91 // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92 // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93 // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94 // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95 // instead of setting it to AwaitingAnnouncedRemoteRevoke.
97 // Outbound state can only be `LocalAnnounced` or `Committed`
101 enum InboundHTLCRemovalReason {
102 FailRelay(msgs::OnionErrorPacket),
103 FailMalformed(([u8; 32], u16)),
104 Fulfill(PaymentPreimage),
107 enum InboundHTLCState {
108 /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109 /// update_add_htlc message for this HTLC.
110 RemoteAnnounced(PendingHTLCStatus),
111 /// Included in a received commitment_signed message (implying we've
112 /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113 /// state (see the example below). We have not yet included this HTLC in a
114 /// commitment_signed message because we are waiting on the remote's
115 /// aforementioned state revocation. One reason this missing remote RAA
116 /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117 /// is because every time we create a new "state", i.e. every time we sign a
118 /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119 /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120 /// sent provided the per_commitment_point for our current commitment tx.
121 /// The other reason we should not send a commitment_signed without their RAA
122 /// is because their RAA serves to ACK our previous commitment_signed.
124 /// Here's an example of how an HTLC could come to be in this state:
125 /// remote --> update_add_htlc(prev_htlc) --> local
126 /// remote --> commitment_signed(prev_htlc) --> local
127 /// remote <-- revoke_and_ack <-- local
128 /// remote <-- commitment_signed(prev_htlc) <-- local
129 /// [note that here, the remote does not respond with a RAA]
130 /// remote --> update_add_htlc(this_htlc) --> local
131 /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132 /// Now `this_htlc` will be assigned this state. It's unable to be officially
133 /// accepted, i.e. included in a commitment_signed, because we're missing the
134 /// RAA that provides our next per_commitment_point. The per_commitment_point
135 /// is used to derive commitment keys, which are used to construct the
136 /// signatures in a commitment_signed message.
137 /// Implies AwaitingRemoteRevoke.
139 /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140 AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141 /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142 /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143 /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144 /// channel (before it can then get forwarded and/or removed).
145 /// Implies AwaitingRemoteRevoke.
146 AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
148 /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
151 /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152 /// commitment transaction without it as otherwise we'll have to force-close the channel to
153 /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154 /// anyway). That said, ChannelMonitor does this for us (see
155 /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156 /// our own local state before then, once we're sure that the next commitment_signed and
157 /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158 LocalRemoved(InboundHTLCRemovalReason),
161 struct InboundHTLCOutput {
165 payment_hash: PaymentHash,
166 state: InboundHTLCState,
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171 /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172 /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173 /// we will promote to Committed (note that they may not accept it until the next time we
174 /// revoke, but we don't really care about that:
175 /// * they've revoked, so worst case we can announce an old state and get our (option on)
176 /// money back (though we won't), and,
177 /// * we'll send them a revoke when they send a commitment_signed, and since only they're
178 /// allowed to remove it, the "can only be removed once committed on both sides" requirement
179 /// doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180 /// we'll never get out of sync).
181 /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182 /// OutboundHTLCOutput's size just for a temporary bit
183 LocalAnnounced(Box<msgs::OnionPacket>),
185 /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186 /// the change (though they'll need to revoke before we fail the payment).
187 RemoteRemoved(OutboundHTLCOutcome),
188 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191 /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192 /// remote revoke_and_ack on a previous state before we can do so.
193 AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194 /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195 /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196 /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197 /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198 /// revoke_and_ack to drop completely.
199 AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205 /// LDK version 0.0.105+ will always fill in the preimage here.
206 Success(Option<PaymentPreimage>),
207 Failure(HTLCFailReason),
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211 fn from(o: Option<HTLCFailReason>) -> Self {
213 None => OutboundHTLCOutcome::Success(None),
214 Some(r) => OutboundHTLCOutcome::Failure(r)
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220 fn into(self) -> Option<&'a HTLCFailReason> {
222 OutboundHTLCOutcome::Success(_) => None,
223 OutboundHTLCOutcome::Failure(ref r) => Some(r)
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
233 payment_hash: PaymentHash,
234 state: OutboundHTLCState,
236 blinding_point: Option<PublicKey>,
237 skimmed_fee_msat: Option<u64>,
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243 AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
247 payment_hash: PaymentHash,
249 onion_routing_packet: msgs::OnionPacket,
250 // The extra fee we're skimming off the top of this HTLC.
251 skimmed_fee_msat: Option<u64>,
252 blinding_point: Option<PublicKey>,
255 payment_preimage: PaymentPreimage,
260 err_packet: msgs::OnionErrorPacket,
265 sha256_of_onion: [u8; 32],
269 macro_rules! define_state_flags {
270 ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271 #[doc = $flag_type_doc]
272 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273 struct $flag_type(u32);
278 const $flag: $flag_type = $flag_type($value);
281 /// All flags that apply to the specified [`ChannelState`] variant.
283 const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
286 fn new() -> Self { Self(0) }
289 fn from_u32(flags: u32) -> Result<Self, ()> {
290 if flags & !Self::ALL.0 != 0 {
293 Ok($flag_type(flags))
298 fn is_empty(&self) -> bool { self.0 == 0 }
301 fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
303 fn set(&mut self, flag: Self) { *self |= flag }
305 fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
308 impl core::ops::Not for $flag_type {
310 fn not(self) -> Self::Output { Self(!self.0) }
312 impl core::ops::BitOr for $flag_type {
314 fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
316 impl core::ops::BitOrAssign for $flag_type {
317 fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
319 impl core::ops::BitAnd for $flag_type {
321 fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
323 impl core::ops::BitAndAssign for $flag_type {
324 fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
327 ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
328 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
330 ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
333 fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
335 fn $set(&mut self) { self.set($flag_type::new() | $flag) }
337 fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
340 ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
341 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
342 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
344 fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
346 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
347 fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
349 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
351 fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
353 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
354 fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
356 impl PartialEq<FundedStateFlags> for $flag_type {
357 fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
359 impl From<FundedStateFlags> for $flag_type {
360 fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
365 /// We declare all the states/flags here together to help determine which bits are still available
368 pub const OUR_INIT_SENT: u32 = 1 << 0;
369 pub const THEIR_INIT_SENT: u32 = 1 << 1;
370 pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
371 pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
372 pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
373 pub const OUR_CHANNEL_READY: u32 = 1 << 5;
374 pub const CHANNEL_READY: u32 = 1 << 6;
375 pub const PEER_DISCONNECTED: u32 = 1 << 7;
376 pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
377 pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
378 pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
379 pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
380 pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
381 pub const WAITING_FOR_BATCH: u32 = 1 << 13;
385 "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
387 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
388 until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
389 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
390 somewhere and we should pause sending any outbound messages until they've managed to \
391 complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
392 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
393 any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
394 message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
395 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
396 the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
401 "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
402 NegotiatingFundingFlags, [
403 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
404 OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
405 ("Indicates we have received their `open_channel`/`accept_channel` message.",
406 THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
411 "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
412 FUNDED_STATE, AwaitingChannelReadyFlags, [
413 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
414 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
415 THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
416 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
417 `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
418 OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
419 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
420 is being held until all channels in the batch have received `funding_signed` and have \
421 their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
426 "Flags that only apply to [`ChannelState::ChannelReady`].",
427 FUNDED_STATE, ChannelReadyFlags, [
428 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
429 `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
430 messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
431 implicit ACK, so instead we have to hold them away temporarily to be sent later.",
432 AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
436 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
437 // into account when introducing new states and update `test_channel_state_order` accordingly.
438 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
440 /// We are negotiating the parameters required for the channel prior to funding it.
441 NegotiatingFunding(NegotiatingFundingFlags),
442 /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
443 /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
444 /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
446 /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
447 /// funding transaction to confirm.
448 AwaitingChannelReady(AwaitingChannelReadyFlags),
449 /// Both we and our counterparty consider the funding transaction confirmed and the channel is
451 ChannelReady(ChannelReadyFlags),
452 /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
453 /// is about to drop us, but we store this anyway.
457 macro_rules! impl_state_flag {
458 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
460 fn $get(&self) -> bool {
463 ChannelState::$state(flags) => flags.is_set($state_flag.into()),
472 ChannelState::$state(flags) => *flags |= $state_flag,
474 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
478 fn $clear(&mut self) {
481 ChannelState::$state(flags) => *flags &= !($state_flag),
483 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
487 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
488 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
490 ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
491 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
496 fn from_u32(state: u32) -> Result<Self, ()> {
498 state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
499 state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
501 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
502 AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
503 .map(|flags| ChannelState::AwaitingChannelReady(flags))
504 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
505 ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
506 .map(|flags| ChannelState::ChannelReady(flags))
507 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
508 Ok(ChannelState::NegotiatingFunding(flags))
516 fn to_u32(&self) -> u32 {
518 ChannelState::NegotiatingFunding(flags) => flags.0,
519 ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
520 ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
521 ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
522 ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
526 fn is_pre_funded_state(&self) -> bool {
527 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
530 fn is_both_sides_shutdown(&self) -> bool {
531 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
534 fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
536 ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
537 ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
538 _ => FundedStateFlags::new(),
542 fn can_generate_new_commitment(&self) -> bool {
544 ChannelState::ChannelReady(flags) =>
545 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
546 !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
547 !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
549 debug_assert!(false, "Can only generate new commitment within ChannelReady");
555 impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
556 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
557 impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
558 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
559 impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
560 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
561 impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
562 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
563 impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
564 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
565 impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
566 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
567 impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
568 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
569 impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
570 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
573 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
575 pub const DEFAULT_MAX_HTLCS: u16 = 50;
577 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
578 const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
579 const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
580 if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
584 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
586 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
588 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
590 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
591 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
592 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
593 /// `holder_max_htlc_value_in_flight_msat`.
594 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
596 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
597 /// `option_support_large_channel` (aka wumbo channels) is not supported.
599 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
601 /// Total bitcoin supply in satoshis.
602 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
604 /// The maximum network dust limit for standard script formats. This currently represents the
605 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
606 /// transaction non-standard and thus refuses to relay it.
607 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
608 /// implementations use this value for their dust limit today.
609 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
611 /// The maximum channel dust limit we will accept from our counterparty.
612 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
614 /// The dust limit is used for both the commitment transaction outputs as well as the closing
615 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
616 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
617 /// In order to avoid having to concern ourselves with standardness during the closing process, we
618 /// simply require our counterparty to use a dust limit which will leave any segwit output
620 /// See <https://github.com/lightning/bolts/issues/905> for more details.
621 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
623 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
624 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
626 /// Used to return a simple Error back to ChannelManager. Will get converted to a
627 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
628 /// channel_id in ChannelManager.
629 pub(super) enum ChannelError {
635 impl fmt::Debug for ChannelError {
636 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
638 &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
639 &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
640 &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
645 impl fmt::Display for ChannelError {
646 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
648 &ChannelError::Ignore(ref e) => write!(f, "{}", e),
649 &ChannelError::Warn(ref e) => write!(f, "{}", e),
650 &ChannelError::Close(ref e) => write!(f, "{}", e),
655 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
657 pub peer_id: Option<PublicKey>,
658 pub channel_id: Option<ChannelId>,
661 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
662 fn log(&self, mut record: Record) {
663 record.peer_id = self.peer_id;
664 record.channel_id = self.channel_id;
665 self.logger.log(record)
669 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
670 where L::Target: Logger {
671 pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
672 where S::Target: SignerProvider
676 peer_id: Some(context.counterparty_node_id),
677 channel_id: Some(context.channel_id),
682 macro_rules! secp_check {
683 ($res: expr, $err: expr) => {
686 Err(_) => return Err(ChannelError::Close($err)),
691 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
692 /// our counterparty or not. However, we don't want to announce updates right away to avoid
693 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
694 /// our channel_update message and track the current state here.
695 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
696 #[derive(Clone, Copy, PartialEq)]
697 pub(super) enum ChannelUpdateStatus {
698 /// We've announced the channel as enabled and are connected to our peer.
700 /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
702 /// Our channel is live again, but we haven't announced the channel as enabled yet.
704 /// We've announced the channel as disabled.
708 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
710 pub enum AnnouncementSigsState {
711 /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
712 /// we sent the last `AnnouncementSignatures`.
714 /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
715 /// This state never appears on disk - instead we write `NotSent`.
717 /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
718 /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
719 /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
720 /// they send back a `RevokeAndACK`.
721 /// This state never appears on disk - instead we write `NotSent`.
723 /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
724 /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
728 /// An enum indicating whether the local or remote side offered a given HTLC.
734 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
737 pending_htlcs_value_msat: u64,
738 on_counterparty_tx_dust_exposure_msat: u64,
739 on_holder_tx_dust_exposure_msat: u64,
740 holding_cell_msat: u64,
741 on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
744 /// An enum gathering stats on commitment transaction, either local or remote.
745 struct CommitmentStats<'a> {
746 tx: CommitmentTransaction, // the transaction info
747 feerate_per_kw: u32, // the feerate included to build the transaction
748 total_fee_sat: u64, // the total fee included in the transaction
749 num_nondust_htlcs: usize, // the number of HTLC outputs (dust HTLCs *non*-included)
750 htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
751 local_balance_msat: u64, // local balance before fees *not* considering dust limits
752 remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
753 outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
754 inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
757 /// Used when calculating whether we or the remote can afford an additional HTLC.
758 struct HTLCCandidate {
760 origin: HTLCInitiator,
764 fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
772 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
774 enum UpdateFulfillFetch {
776 monitor_update: ChannelMonitorUpdate,
777 htlc_value_msat: u64,
778 msg: Option<msgs::UpdateFulfillHTLC>,
783 /// The return type of get_update_fulfill_htlc_and_commit.
784 pub enum UpdateFulfillCommitFetch {
785 /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
786 /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
787 /// previously placed in the holding cell (and has since been removed).
789 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
790 monitor_update: ChannelMonitorUpdate,
791 /// The value of the HTLC which was claimed, in msat.
792 htlc_value_msat: u64,
794 /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
795 /// or has been forgotten (presumably previously claimed).
799 /// The return value of `monitor_updating_restored`
800 pub(super) struct MonitorRestoreUpdates {
801 pub raa: Option<msgs::RevokeAndACK>,
802 pub commitment_update: Option<msgs::CommitmentUpdate>,
803 pub order: RAACommitmentOrder,
804 pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
805 pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
806 pub finalized_claimed_htlcs: Vec<HTLCSource>,
807 pub funding_broadcastable: Option<Transaction>,
808 pub channel_ready: Option<msgs::ChannelReady>,
809 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
812 /// The return value of `signer_maybe_unblocked`
814 pub(super) struct SignerResumeUpdates {
815 pub commitment_update: Option<msgs::CommitmentUpdate>,
816 pub funding_signed: Option<msgs::FundingSigned>,
817 pub channel_ready: Option<msgs::ChannelReady>,
820 /// The return value of `channel_reestablish`
821 pub(super) struct ReestablishResponses {
822 pub channel_ready: Option<msgs::ChannelReady>,
823 pub raa: Option<msgs::RevokeAndACK>,
824 pub commitment_update: Option<msgs::CommitmentUpdate>,
825 pub order: RAACommitmentOrder,
826 pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
827 pub shutdown_msg: Option<msgs::Shutdown>,
830 /// The result of a shutdown that should be handled.
832 pub(crate) struct ShutdownResult {
833 pub(crate) closure_reason: ClosureReason,
834 /// A channel monitor update to apply.
835 pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
836 /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
837 pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
838 /// An unbroadcasted batch funding transaction id. The closure of this channel should be
839 /// propagated to the remainder of the batch.
840 pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
841 pub(crate) channel_id: ChannelId,
842 pub(crate) user_channel_id: u128,
843 pub(crate) channel_capacity_satoshis: u64,
844 pub(crate) counterparty_node_id: PublicKey,
845 pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
848 /// If the majority of the channels funds are to the fundee and the initiator holds only just
849 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
850 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
851 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
852 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
853 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
854 /// by this multiple without hitting this case, before sending.
855 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
856 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
857 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
858 /// leave the channel less usable as we hold a bigger reserve.
859 #[cfg(any(fuzzing, test))]
860 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
861 #[cfg(not(any(fuzzing, test)))]
862 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
864 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
865 /// channel creation on an inbound channel, we simply force-close and move on.
866 /// This constant is the one suggested in BOLT 2.
867 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
869 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
870 /// not have enough balance value remaining to cover the onchain cost of this new
871 /// HTLC weight. If this happens, our counterparty fails the reception of our
872 /// commitment_signed including this new HTLC due to infringement on the channel
874 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
875 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
876 /// leads to a channel force-close. Ultimately, this is an issue coming from the
877 /// design of LN state machines, allowing asynchronous updates.
878 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
880 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
881 /// commitment transaction fees, with at least this many HTLCs present on the commitment
882 /// transaction (not counting the value of the HTLCs themselves).
883 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
885 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
886 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
887 /// ChannelUpdate prompted by the config update. This value was determined as follows:
889 /// * The expected interval between ticks (1 minute).
890 /// * The average convergence delay of updates across the network, i.e., ~300 seconds on average
891 /// for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
892 /// * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
893 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
895 /// The number of ticks that may elapse while we're waiting for a response to a
896 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
899 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
900 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
902 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
903 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
904 /// exceeding this age limit will be force-closed and purged from memory.
905 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
907 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
908 pub(crate) const COINBASE_MATURITY: u32 = 100;
910 struct PendingChannelMonitorUpdate {
911 update: ChannelMonitorUpdate,
914 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
915 (0, update, required),
918 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
919 /// its variants containing an appropriate channel struct.
920 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
921 UnfundedOutboundV1(OutboundV1Channel<SP>),
922 UnfundedInboundV1(InboundV1Channel<SP>),
926 impl<'a, SP: Deref> ChannelPhase<SP> where
927 SP::Target: SignerProvider,
928 <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
930 pub fn context(&'a self) -> &'a ChannelContext<SP> {
932 ChannelPhase::Funded(chan) => &chan.context,
933 ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
934 ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
938 pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
940 ChannelPhase::Funded(ref mut chan) => &mut chan.context,
941 ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
942 ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
947 /// Contains all state common to unfunded inbound/outbound channels.
948 pub(super) struct UnfundedChannelContext {
949 /// A counter tracking how many ticks have elapsed since this unfunded channel was
950 /// created. If this unfunded channel reaches peer has yet to respond after reaching
951 /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
953 /// This is so that we don't keep channels around that haven't progressed to a funded state
954 /// in a timely manner.
955 unfunded_channel_age_ticks: usize,
958 impl UnfundedChannelContext {
959 /// Determines whether we should force-close and purge this unfunded channel from memory due to it
960 /// having reached the unfunded channel age limit.
962 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
963 pub fn should_expire_unfunded_channel(&mut self) -> bool {
964 self.unfunded_channel_age_ticks += 1;
965 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
969 /// Contains everything about the channel including state, and various flags.
970 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
971 config: LegacyChannelConfig,
973 // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
974 // constructed using it. The second element in the tuple corresponds to the number of ticks that
975 // have elapsed since the update occurred.
976 prev_config: Option<(ChannelConfig, usize)>,
978 inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
982 /// The current channel ID.
983 channel_id: ChannelId,
984 /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
985 /// Will be `None` for channels created prior to 0.0.115.
986 temporary_channel_id: Option<ChannelId>,
987 channel_state: ChannelState,
989 // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
990 // our peer. However, we want to make sure they received it, or else rebroadcast it when we
992 // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
993 // Note that a number of our tests were written prior to the behavior here which retransmits
994 // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
996 #[cfg(any(test, feature = "_test_utils"))]
997 pub(crate) announcement_sigs_state: AnnouncementSigsState,
998 #[cfg(not(any(test, feature = "_test_utils")))]
999 announcement_sigs_state: AnnouncementSigsState,
1001 secp_ctx: Secp256k1<secp256k1::All>,
1002 channel_value_satoshis: u64,
1004 latest_monitor_update_id: u64,
1006 holder_signer: ChannelSignerType<SP>,
1007 shutdown_scriptpubkey: Option<ShutdownScript>,
1008 destination_script: ScriptBuf,
1010 // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1011 // generation start at 0 and count up...this simplifies some parts of implementation at the
1012 // cost of others, but should really just be changed.
1014 cur_holder_commitment_transaction_number: u64,
1015 cur_counterparty_commitment_transaction_number: u64,
1016 value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1017 pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1018 pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1019 holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1021 /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1022 /// need to ensure we resend them in the order we originally generated them. Note that because
1023 /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1024 /// sufficient to simply set this to the opposite of any message we are generating as we
1025 /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1026 /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1028 resend_order: RAACommitmentOrder,
1030 monitor_pending_channel_ready: bool,
1031 monitor_pending_revoke_and_ack: bool,
1032 monitor_pending_commitment_signed: bool,
1034 // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1035 // responsible for some of the HTLCs here or not - we don't know whether the update in question
1036 // completed or not. We currently ignore these fields entirely when force-closing a channel,
1037 // but need to handle this somehow or we run the risk of losing HTLCs!
1038 monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1039 monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1040 monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1042 /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1043 /// but our signer (initially) refused to give us a signature, we should retry at some point in
1044 /// the future when the signer indicates it may have a signature for us.
1046 /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1047 /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1048 signer_pending_commitment_update: bool,
1049 /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1050 /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1051 /// outbound or inbound.
1052 signer_pending_funding: bool,
1054 // pending_update_fee is filled when sending and receiving update_fee.
1056 // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1057 // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1058 // generating new commitment transactions with exactly the same criteria as inbound/outbound
1059 // HTLCs with similar state.
1060 pending_update_fee: Option<(u32, FeeUpdateState)>,
1061 // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1062 // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1063 // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1064 // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1065 // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1066 holding_cell_update_fee: Option<u32>,
1067 next_holder_htlc_id: u64,
1068 next_counterparty_htlc_id: u64,
1069 feerate_per_kw: u32,
1071 /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1072 /// when the channel is updated in ways which may impact the `channel_update` message or when a
1073 /// new block is received, ensuring it's always at least moderately close to the current real
1075 update_time_counter: u32,
1077 #[cfg(debug_assertions)]
1078 /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1079 holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1080 #[cfg(debug_assertions)]
1081 /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1082 counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1084 last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1085 target_closing_feerate_sats_per_kw: Option<u32>,
1087 /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1088 /// update, we need to delay processing it until later. We do that here by simply storing the
1089 /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1090 pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1092 /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1093 /// transaction. These are set once we reach `closing_negotiation_ready`.
1095 pub(crate) closing_fee_limits: Option<(u64, u64)>,
1097 closing_fee_limits: Option<(u64, u64)>,
1099 /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1100 /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1101 /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1102 /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1103 /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1105 /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1106 /// until we see a `commitment_signed` before doing so.
1108 /// We don't bother to persist this - we anticipate this state won't last longer than a few
1109 /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1110 expecting_peer_commitment_signed: bool,
1112 /// The hash of the block in which the funding transaction was included.
1113 funding_tx_confirmed_in: Option<BlockHash>,
1114 funding_tx_confirmation_height: u32,
1115 short_channel_id: Option<u64>,
1116 /// Either the height at which this channel was created or the height at which it was last
1117 /// serialized if it was serialized by versions prior to 0.0.103.
1118 /// We use this to close if funding is never broadcasted.
1119 channel_creation_height: u32,
1121 counterparty_dust_limit_satoshis: u64,
1124 pub(super) holder_dust_limit_satoshis: u64,
1126 holder_dust_limit_satoshis: u64,
1129 pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1131 counterparty_max_htlc_value_in_flight_msat: u64,
1134 pub(super) holder_max_htlc_value_in_flight_msat: u64,
1136 holder_max_htlc_value_in_flight_msat: u64,
1138 /// minimum channel reserve for self to maintain - set by them.
1139 counterparty_selected_channel_reserve_satoshis: Option<u64>,
1142 pub(super) holder_selected_channel_reserve_satoshis: u64,
1144 holder_selected_channel_reserve_satoshis: u64,
1146 counterparty_htlc_minimum_msat: u64,
1147 holder_htlc_minimum_msat: u64,
1149 pub counterparty_max_accepted_htlcs: u16,
1151 counterparty_max_accepted_htlcs: u16,
1152 holder_max_accepted_htlcs: u16,
1153 minimum_depth: Option<u32>,
1155 counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1157 pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1158 funding_transaction: Option<Transaction>,
1159 is_batch_funding: Option<()>,
1161 counterparty_cur_commitment_point: Option<PublicKey>,
1162 counterparty_prev_commitment_point: Option<PublicKey>,
1163 counterparty_node_id: PublicKey,
1165 counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1167 commitment_secrets: CounterpartyCommitmentSecrets,
1169 channel_update_status: ChannelUpdateStatus,
1170 /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1171 /// not complete within a single timer tick (one minute), we should force-close the channel.
1172 /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1174 /// Note that this field is reset to false on deserialization to give us a chance to connect to
1175 /// our peer and start the closing_signed negotiation fresh.
1176 closing_signed_in_flight: bool,
1178 /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1179 /// This can be used to rebroadcast the channel_announcement message later.
1180 announcement_sigs: Option<(Signature, Signature)>,
1182 // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1183 // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1184 // be, by comparing the cached values to the fee of the tranaction generated by
1185 // `build_commitment_transaction`.
1186 #[cfg(any(test, fuzzing))]
1187 next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1188 #[cfg(any(test, fuzzing))]
1189 next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1191 /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1192 /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1193 /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1194 /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1195 /// message until we receive a channel_reestablish.
1197 /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1198 pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1200 /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1201 /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1202 /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1203 /// unblock the state machine.
1205 /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1206 /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1207 /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1209 /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1210 /// [`msgs::RevokeAndACK`] message from the counterparty.
1211 sent_message_awaiting_response: Option<usize>,
1213 #[cfg(any(test, fuzzing))]
1214 // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1215 // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1216 // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1217 // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1218 // is fine, but as a sanity check in our failure to generate the second claim, we check here
1219 // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1220 historical_inbound_htlc_fulfills: HashSet<u64>,
1222 /// This channel's type, as negotiated during channel open
1223 channel_type: ChannelTypeFeatures,
1225 // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1226 // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1227 // the channel's funding UTXO.
1229 // We also use this when sending our peer a channel_update that isn't to be broadcasted
1230 // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1231 // associated channel mapping.
1233 // We only bother storing the most recent SCID alias at any time, though our counterparty has
1234 // to store all of them.
1235 latest_inbound_scid_alias: Option<u64>,
1237 // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1238 // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1239 // don't currently support node id aliases and eventually privacy should be provided with
1240 // blinded paths instead of simple scid+node_id aliases.
1241 outbound_scid_alias: u64,
1243 // We track whether we already emitted a `ChannelPending` event.
1244 channel_pending_event_emitted: bool,
1246 // We track whether we already emitted a `ChannelReady` event.
1247 channel_ready_event_emitted: bool,
1249 /// The unique identifier used to re-derive the private key material for the channel through
1250 /// [`SignerProvider::derive_channel_signer`].
1251 channel_keys_id: [u8; 32],
1253 /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1254 /// store it here and only release it to the `ChannelManager` once it asks for it.
1255 blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1258 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider {
1259 /// Allowed in any state (including after shutdown)
1260 pub fn get_update_time_counter(&self) -> u32 {
1261 self.update_time_counter
1264 pub fn get_latest_monitor_update_id(&self) -> u64 {
1265 self.latest_monitor_update_id
1268 pub fn should_announce(&self) -> bool {
1269 self.config.announced_channel
1272 pub fn is_outbound(&self) -> bool {
1273 self.channel_transaction_parameters.is_outbound_from_holder
1276 /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1277 /// Allowed in any state (including after shutdown)
1278 pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1279 self.config.options.forwarding_fee_base_msat
1282 /// Returns true if we've ever received a message from the remote end for this Channel
1283 pub fn have_received_message(&self) -> bool {
1284 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1287 /// Returns true if this channel is fully established and not known to be closing.
1288 /// Allowed in any state (including after shutdown)
1289 pub fn is_usable(&self) -> bool {
1290 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1291 !self.channel_state.is_local_shutdown_sent() &&
1292 !self.channel_state.is_remote_shutdown_sent() &&
1293 !self.monitor_pending_channel_ready
1296 /// shutdown state returns the state of the channel in its various stages of shutdown
1297 pub fn shutdown_state(&self) -> ChannelShutdownState {
1298 match self.channel_state {
1299 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1300 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1301 ChannelShutdownState::ShutdownInitiated
1302 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1303 ChannelShutdownState::ResolvingHTLCs
1304 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1305 ChannelShutdownState::NegotiatingClosingFee
1307 ChannelShutdownState::NotShuttingDown
1309 ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1310 _ => ChannelShutdownState::NotShuttingDown,
1314 fn closing_negotiation_ready(&self) -> bool {
1315 let is_ready_to_close = match self.channel_state {
1316 ChannelState::AwaitingChannelReady(flags) =>
1317 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1318 ChannelState::ChannelReady(flags) =>
1319 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1322 self.pending_inbound_htlcs.is_empty() &&
1323 self.pending_outbound_htlcs.is_empty() &&
1324 self.pending_update_fee.is_none() &&
1328 /// Returns true if this channel is currently available for use. This is a superset of
1329 /// is_usable() and considers things like the channel being temporarily disabled.
1330 /// Allowed in any state (including after shutdown)
1331 pub fn is_live(&self) -> bool {
1332 self.is_usable() && !self.channel_state.is_peer_disconnected()
1335 // Public utilities:
1337 pub fn channel_id(&self) -> ChannelId {
1341 // Return the `temporary_channel_id` used during channel establishment.
1343 // Will return `None` for channels created prior to LDK version 0.0.115.
1344 pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1345 self.temporary_channel_id
1348 pub fn minimum_depth(&self) -> Option<u32> {
1352 /// Gets the "user_id" value passed into the construction of this channel. It has no special
1353 /// meaning and exists only to allow users to have a persistent identifier of a channel.
1354 pub fn get_user_id(&self) -> u128 {
1358 /// Gets the channel's type
1359 pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1363 /// Gets the channel's `short_channel_id`.
1365 /// Will return `None` if the channel hasn't been confirmed yet.
1366 pub fn get_short_channel_id(&self) -> Option<u64> {
1367 self.short_channel_id
1370 /// Allowed in any state (including after shutdown)
1371 pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1372 self.latest_inbound_scid_alias
1375 /// Allowed in any state (including after shutdown)
1376 pub fn outbound_scid_alias(&self) -> u64 {
1377 self.outbound_scid_alias
1380 /// Returns the holder signer for this channel.
1382 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1383 return &self.holder_signer
1386 /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1387 /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1388 /// or prior to any channel actions during `Channel` initialization.
1389 pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1390 debug_assert_eq!(self.outbound_scid_alias, 0);
1391 self.outbound_scid_alias = outbound_scid_alias;
1394 /// Returns the funding_txo we either got from our peer, or were given by
1395 /// get_funding_created.
1396 pub fn get_funding_txo(&self) -> Option<OutPoint> {
1397 self.channel_transaction_parameters.funding_outpoint
1400 /// Returns the height in which our funding transaction was confirmed.
1401 pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1402 let conf_height = self.funding_tx_confirmation_height;
1403 if conf_height > 0 {
1410 /// Returns the block hash in which our funding transaction was confirmed.
1411 pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1412 self.funding_tx_confirmed_in
1415 /// Returns the current number of confirmations on the funding transaction.
1416 pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1417 if self.funding_tx_confirmation_height == 0 {
1418 // We either haven't seen any confirmation yet, or observed a reorg.
1422 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1425 fn get_holder_selected_contest_delay(&self) -> u16 {
1426 self.channel_transaction_parameters.holder_selected_contest_delay
1429 fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1430 &self.channel_transaction_parameters.holder_pubkeys
1433 pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1434 self.channel_transaction_parameters.counterparty_parameters
1435 .as_ref().map(|params| params.selected_contest_delay)
1438 fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1439 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1442 /// Allowed in any state (including after shutdown)
1443 pub fn get_counterparty_node_id(&self) -> PublicKey {
1444 self.counterparty_node_id
1447 /// Allowed in any state (including after shutdown)
1448 pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1449 self.holder_htlc_minimum_msat
1452 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1453 pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1454 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1457 /// Allowed in any state (including after shutdown)
1458 pub fn get_announced_htlc_max_msat(&self) -> u64 {
1460 // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1461 // to use full capacity. This is an effort to reduce routing failures, because in many cases
1462 // channel might have been used to route very small values (either by honest users or as DoS).
1463 self.channel_value_satoshis * 1000 * 9 / 10,
1465 self.counterparty_max_htlc_value_in_flight_msat
1469 /// Allowed in any state (including after shutdown)
1470 pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1471 self.counterparty_htlc_minimum_msat
1474 /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1475 pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1476 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1479 fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1480 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1481 let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1483 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1484 party_max_htlc_value_in_flight_msat
1489 pub fn get_value_satoshis(&self) -> u64 {
1490 self.channel_value_satoshis
1493 pub fn get_fee_proportional_millionths(&self) -> u32 {
1494 self.config.options.forwarding_fee_proportional_millionths
1497 pub fn get_cltv_expiry_delta(&self) -> u16 {
1498 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1501 pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1502 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1503 where F::Target: FeeEstimator
1505 match self.config.options.max_dust_htlc_exposure {
1506 MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1507 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1508 ConfirmationTarget::OnChainSweep) as u64;
1509 feerate_per_kw.saturating_mul(multiplier)
1511 MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1515 /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1516 pub fn prev_config(&self) -> Option<ChannelConfig> {
1517 self.prev_config.map(|prev_config| prev_config.0)
1520 // Checks whether we should emit a `ChannelPending` event.
1521 pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1522 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1525 // Returns whether we already emitted a `ChannelPending` event.
1526 pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1527 self.channel_pending_event_emitted
1530 // Remembers that we already emitted a `ChannelPending` event.
1531 pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1532 self.channel_pending_event_emitted = true;
1535 // Checks whether we should emit a `ChannelReady` event.
1536 pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1537 self.is_usable() && !self.channel_ready_event_emitted
1540 // Remembers that we already emitted a `ChannelReady` event.
1541 pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1542 self.channel_ready_event_emitted = true;
1545 /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1546 /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1547 /// no longer be considered when forwarding HTLCs.
1548 pub fn maybe_expire_prev_config(&mut self) {
1549 if self.prev_config.is_none() {
1552 let prev_config = self.prev_config.as_mut().unwrap();
1554 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1555 self.prev_config = None;
1559 /// Returns the current [`ChannelConfig`] applied to the channel.
1560 pub fn config(&self) -> ChannelConfig {
1564 /// Updates the channel's config. A bool is returned indicating whether the config update
1565 /// applied resulted in a new ChannelUpdate message.
1566 pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1567 let did_channel_update =
1568 self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1569 self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1570 self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1571 if did_channel_update {
1572 self.prev_config = Some((self.config.options, 0));
1573 // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1574 // policy change to propagate throughout the network.
1575 self.update_time_counter += 1;
1577 self.config.options = *config;
1581 /// Returns true if funding_signed was sent/received and the
1582 /// funding transaction has been broadcast if necessary.
1583 pub fn is_funding_broadcast(&self) -> bool {
1584 !self.channel_state.is_pre_funded_state() &&
1585 !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1588 /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1589 /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1590 /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1591 /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1592 /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1594 /// @local is used only to convert relevant internal structures which refer to remote vs local
1595 /// to decide value of outputs and direction of HTLCs.
1596 /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1597 /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1598 /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1599 /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1600 /// which peer generated this transaction and "to whom" this transaction flows.
1602 fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1603 where L::Target: Logger
1605 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1606 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1607 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1609 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1610 let mut remote_htlc_total_msat = 0;
1611 let mut local_htlc_total_msat = 0;
1612 let mut value_to_self_msat_offset = 0;
1614 let mut feerate_per_kw = self.feerate_per_kw;
1615 if let Some((feerate, update_state)) = self.pending_update_fee {
1616 if match update_state {
1617 // Note that these match the inclusion criteria when scanning
1618 // pending_inbound_htlcs below.
1619 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1620 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1621 FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local },
1623 feerate_per_kw = feerate;
1627 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1628 commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1629 get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1631 if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1633 macro_rules! get_htlc_in_commitment {
1634 ($htlc: expr, $offered: expr) => {
1635 HTLCOutputInCommitment {
1637 amount_msat: $htlc.amount_msat,
1638 cltv_expiry: $htlc.cltv_expiry,
1639 payment_hash: $htlc.payment_hash,
1640 transaction_output_index: None
1645 macro_rules! add_htlc_output {
1646 ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1647 if $outbound == local { // "offered HTLC output"
1648 let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1649 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1652 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1654 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1655 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1656 included_non_dust_htlcs.push((htlc_in_tx, $source));
1658 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1659 included_dust_htlcs.push((htlc_in_tx, $source));
1662 let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1663 let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1666 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1668 if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1669 log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1670 included_non_dust_htlcs.push((htlc_in_tx, $source));
1672 log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1673 included_dust_htlcs.push((htlc_in_tx, $source));
1679 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1681 for ref htlc in self.pending_inbound_htlcs.iter() {
1682 let (include, state_name) = match htlc.state {
1683 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1684 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1685 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1686 InboundHTLCState::Committed => (true, "Committed"),
1687 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1691 add_htlc_output!(htlc, false, None, state_name);
1692 remote_htlc_total_msat += htlc.amount_msat;
1694 log_trace!(logger, " ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1696 &InboundHTLCState::LocalRemoved(ref reason) => {
1697 if generated_by_local {
1698 if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1699 inbound_htlc_preimages.push(preimage);
1700 value_to_self_msat_offset += htlc.amount_msat as i64;
1710 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1712 for ref htlc in self.pending_outbound_htlcs.iter() {
1713 let (include, state_name) = match htlc.state {
1714 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1715 OutboundHTLCState::Committed => (true, "Committed"),
1716 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1717 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1718 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1721 let preimage_opt = match htlc.state {
1722 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1723 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1724 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1728 if let Some(preimage) = preimage_opt {
1729 outbound_htlc_preimages.push(preimage);
1733 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1734 local_htlc_total_msat += htlc.amount_msat;
1736 log_trace!(logger, " ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1738 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1739 value_to_self_msat_offset -= htlc.amount_msat as i64;
1741 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1742 if !generated_by_local {
1743 value_to_self_msat_offset -= htlc.amount_msat as i64;
1751 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1752 assert!(value_to_self_msat >= 0);
1753 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1754 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1755 // "violate" their reserve value by couting those against it. Thus, we have to convert
1756 // everything to i64 before subtracting as otherwise we can overflow.
1757 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1758 assert!(value_to_remote_msat >= 0);
1760 #[cfg(debug_assertions)]
1762 // Make sure that the to_self/to_remote is always either past the appropriate
1763 // channel_reserve *or* it is making progress towards it.
1764 let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1765 self.holder_max_commitment_tx_output.lock().unwrap()
1767 self.counterparty_max_commitment_tx_output.lock().unwrap()
1769 debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1770 broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1771 debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1772 broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1775 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1776 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1777 let (value_to_self, value_to_remote) = if self.is_outbound() {
1778 (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1780 (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1783 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1784 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1785 let (funding_pubkey_a, funding_pubkey_b) = if local {
1786 (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1788 (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1791 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1792 log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1797 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1798 log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1803 let num_nondust_htlcs = included_non_dust_htlcs.len();
1805 let channel_parameters =
1806 if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1807 else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1808 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1815 &mut included_non_dust_htlcs,
1818 let mut htlcs_included = included_non_dust_htlcs;
1819 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1820 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1821 htlcs_included.append(&mut included_dust_htlcs);
1829 local_balance_msat: value_to_self_msat as u64,
1830 remote_balance_msat: value_to_remote_msat as u64,
1831 inbound_htlc_preimages,
1832 outbound_htlc_preimages,
1837 /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1838 /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1839 /// our counterparty!)
1840 /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1841 /// TODO Some magic rust shit to compile-time check this?
1842 fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1843 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1844 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1845 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1846 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1848 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1852 /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1853 /// will sign and send to our counterparty.
1854 /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1855 fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1856 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1857 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1858 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1860 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1863 /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1864 /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1865 /// Panics if called before accept_channel/InboundV1Channel::new
1866 pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1867 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1870 fn counterparty_funding_pubkey(&self) -> &PublicKey {
1871 &self.get_counterparty_pubkeys().funding_pubkey
1874 pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1878 pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1879 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1880 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1881 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1882 // more dust balance if the feerate increases when we have several HTLCs pending
1883 // which are near the dust limit.
1884 let mut feerate_per_kw = self.feerate_per_kw;
1885 // If there's a pending update fee, use it to ensure we aren't under-estimating
1886 // potential feerate updates coming soon.
1887 if let Some((feerate, _)) = self.pending_update_fee {
1888 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1890 if let Some(feerate) = outbound_feerate_update {
1891 feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1893 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
1894 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
1897 /// Get forwarding information for the counterparty.
1898 pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1899 self.counterparty_forwarding_info.clone()
1902 /// Returns a HTLCStats about inbound pending htlcs
1903 fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1905 let mut stats = HTLCStats {
1906 pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1907 pending_htlcs_value_msat: 0,
1908 on_counterparty_tx_dust_exposure_msat: 0,
1909 on_holder_tx_dust_exposure_msat: 0,
1910 holding_cell_msat: 0,
1911 on_holder_tx_holding_cell_htlcs_count: 0,
1914 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1917 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1918 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1919 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1921 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1922 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1923 for ref htlc in context.pending_inbound_htlcs.iter() {
1924 stats.pending_htlcs_value_msat += htlc.amount_msat;
1925 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1926 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1928 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1929 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1935 /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1936 fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1938 let mut stats = HTLCStats {
1939 pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1940 pending_htlcs_value_msat: 0,
1941 on_counterparty_tx_dust_exposure_msat: 0,
1942 on_holder_tx_dust_exposure_msat: 0,
1943 holding_cell_msat: 0,
1944 on_holder_tx_holding_cell_htlcs_count: 0,
1947 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1950 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1951 (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1952 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1954 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1955 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1956 for ref htlc in context.pending_outbound_htlcs.iter() {
1957 stats.pending_htlcs_value_msat += htlc.amount_msat;
1958 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1959 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1961 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1962 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1966 for update in context.holding_cell_htlc_updates.iter() {
1967 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1968 stats.pending_htlcs += 1;
1969 stats.pending_htlcs_value_msat += amount_msat;
1970 stats.holding_cell_msat += amount_msat;
1971 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1972 stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1974 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1975 stats.on_holder_tx_dust_exposure_msat += amount_msat;
1977 stats.on_holder_tx_holding_cell_htlcs_count += 1;
1984 /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1985 /// Doesn't bother handling the
1986 /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1987 /// corner case properly.
1988 pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1989 -> AvailableBalances
1990 where F::Target: FeeEstimator
1992 let context = &self;
1993 // Note that we have to handle overflow due to the above case.
1994 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1995 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1997 let mut balance_msat = context.value_to_self_msat;
1998 for ref htlc in context.pending_inbound_htlcs.iter() {
1999 if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2000 balance_msat += htlc.amount_msat;
2003 balance_msat -= outbound_stats.pending_htlcs_value_msat;
2005 let outbound_capacity_msat = context.value_to_self_msat
2006 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
2008 context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2010 let mut available_capacity_msat = outbound_capacity_msat;
2012 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2013 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2017 if context.is_outbound() {
2018 // We should mind channel commit tx fee when computing how much of the available capacity
2019 // can be used in the next htlc. Mirrors the logic in send_htlc.
2021 // The fee depends on whether the amount we will be sending is above dust or not,
2022 // and the answer will in turn change the amount itself — making it a circular
2024 // This complicates the computation around dust-values, up to the one-htlc-value.
2025 let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2026 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2027 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2030 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2031 let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2032 let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2033 let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2034 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2035 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2036 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2039 // We will first subtract the fee as if we were above-dust. Then, if the resulting
2040 // value ends up being below dust, we have this fee available again. In that case,
2041 // match the value to right-below-dust.
2042 let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2043 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2044 if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2045 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2046 debug_assert!(one_htlc_difference_msat != 0);
2047 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2048 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2049 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2051 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2054 // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2055 // sending a new HTLC won't reduce their balance below our reserve threshold.
2056 let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2057 if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2058 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2061 let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2062 let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2064 let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2065 let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2066 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2068 if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2069 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2070 // we've selected for them, we can only send dust HTLCs.
2071 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2075 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2077 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2078 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2079 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2080 // send above the dust limit (as the router can always overpay to meet the dust limit).
2081 let mut remaining_msat_below_dust_exposure_limit = None;
2082 let mut dust_exposure_dust_limit_msat = 0;
2083 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2085 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2086 (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2088 let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2089 (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2090 context.holder_dust_limit_satoshis + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2092 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2093 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2094 remaining_msat_below_dust_exposure_limit =
2095 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2096 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2099 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2100 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2101 remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2102 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2103 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2104 dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2107 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2108 if available_capacity_msat < dust_exposure_dust_limit_msat {
2109 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2111 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2115 available_capacity_msat = cmp::min(available_capacity_msat,
2116 context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2118 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2119 available_capacity_msat = 0;
2123 inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2124 - context.value_to_self_msat as i64
2125 - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2126 - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2128 outbound_capacity_msat,
2129 next_outbound_htlc_limit_msat: available_capacity_msat,
2130 next_outbound_htlc_minimum_msat,
2135 pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2136 let context = &self;
2137 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2140 /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2141 /// number of pending HTLCs that are on track to be in our next commitment tx.
2143 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2144 /// `fee_spike_buffer_htlc` is `Some`.
2146 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2147 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2149 /// Dust HTLCs are excluded.
2150 fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2151 let context = &self;
2152 assert!(context.is_outbound());
2154 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2157 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2158 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2160 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2161 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2163 let mut addl_htlcs = 0;
2164 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2166 HTLCInitiator::LocalOffered => {
2167 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2171 HTLCInitiator::RemoteOffered => {
2172 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2178 let mut included_htlcs = 0;
2179 for ref htlc in context.pending_inbound_htlcs.iter() {
2180 if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2183 // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2184 // transaction including this HTLC if it times out before they RAA.
2185 included_htlcs += 1;
2188 for ref htlc in context.pending_outbound_htlcs.iter() {
2189 if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2193 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2194 OutboundHTLCState::Committed => included_htlcs += 1,
2195 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2196 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2197 // transaction won't be generated until they send us their next RAA, which will mean
2198 // dropping any HTLCs in this state.
2203 for htlc in context.holding_cell_htlc_updates.iter() {
2205 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2206 if amount_msat / 1000 < real_dust_limit_timeout_sat {
2211 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2212 // ack we're guaranteed to never include them in commitment txs anymore.
2216 let num_htlcs = included_htlcs + addl_htlcs;
2217 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2218 #[cfg(any(test, fuzzing))]
2221 if fee_spike_buffer_htlc.is_some() {
2222 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2224 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2225 + context.holding_cell_htlc_updates.len();
2226 let commitment_tx_info = CommitmentTxInfoCached {
2228 total_pending_htlcs,
2229 next_holder_htlc_id: match htlc.origin {
2230 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2231 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2233 next_counterparty_htlc_id: match htlc.origin {
2234 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2235 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2237 feerate: context.feerate_per_kw,
2239 *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2244 /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2245 /// pending HTLCs that are on track to be in their next commitment tx
2247 /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2248 /// `fee_spike_buffer_htlc` is `Some`.
2250 /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2251 /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2253 /// Dust HTLCs are excluded.
2254 fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2255 let context = &self;
2256 assert!(!context.is_outbound());
2258 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2261 (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2262 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2264 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2265 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2267 let mut addl_htlcs = 0;
2268 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2270 HTLCInitiator::LocalOffered => {
2271 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2275 HTLCInitiator::RemoteOffered => {
2276 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2282 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2283 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2284 // committed outbound HTLCs, see below.
2285 let mut included_htlcs = 0;
2286 for ref htlc in context.pending_inbound_htlcs.iter() {
2287 if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2290 included_htlcs += 1;
2293 for ref htlc in context.pending_outbound_htlcs.iter() {
2294 if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2297 // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2298 // i.e. if they've responded to us with an RAA after announcement.
2300 OutboundHTLCState::Committed => included_htlcs += 1,
2301 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2302 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2307 let num_htlcs = included_htlcs + addl_htlcs;
2308 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2309 #[cfg(any(test, fuzzing))]
2312 if fee_spike_buffer_htlc.is_some() {
2313 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2315 let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2316 let commitment_tx_info = CommitmentTxInfoCached {
2318 total_pending_htlcs,
2319 next_holder_htlc_id: match htlc.origin {
2320 HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2321 HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2323 next_counterparty_htlc_id: match htlc.origin {
2324 HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2325 HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2327 feerate: context.feerate_per_kw,
2329 *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2334 fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
2335 match self.channel_state {
2336 ChannelState::FundingNegotiated => f(),
2337 ChannelState::AwaitingChannelReady(flags) =>
2338 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
2339 flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
2349 /// Returns the transaction if there is a pending funding transaction that is yet to be
2351 pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2352 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2355 /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2357 pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2358 self.if_unbroadcasted_funding(||
2359 self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2363 /// Returns whether the channel is funded in a batch.
2364 pub fn is_batch_funding(&self) -> bool {
2365 self.is_batch_funding.is_some()
2368 /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2370 pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2371 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2374 /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2375 /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2376 /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2377 /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2378 /// immediately (others we will have to allow to time out).
2379 pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
2380 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2381 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2382 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2383 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2384 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2386 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2387 // return them to fail the payment.
2388 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2389 let counterparty_node_id = self.get_counterparty_node_id();
2390 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2392 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2393 dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2398 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2399 // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2400 // returning a channel monitor update here would imply a channel monitor update before
2401 // we even registered the channel monitor to begin with, which is invalid.
2402 // Thus, if we aren't actually at a point where we could conceivably broadcast the
2403 // funding transaction, don't return a funding txo (which prevents providing the
2404 // monitor update to the user, even if we return one).
2405 // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2406 if !self.channel_state.is_pre_funded_state() {
2407 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2408 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2409 update_id: self.latest_monitor_update_id,
2410 counterparty_node_id: Some(self.counterparty_node_id),
2411 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2415 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2416 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
2418 self.channel_state = ChannelState::ShutdownComplete;
2419 self.update_time_counter += 1;
2423 dropped_outbound_htlcs,
2424 unbroadcasted_batch_funding_txid,
2425 channel_id: self.channel_id,
2426 user_channel_id: self.user_id,
2427 channel_capacity_satoshis: self.channel_value_satoshis,
2428 counterparty_node_id: self.counterparty_node_id,
2429 unbroadcasted_funding_tx,
2433 /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2434 fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2435 let counterparty_keys = self.build_remote_transaction_keys();
2436 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2438 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2439 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2440 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2441 &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2443 match &self.holder_signer {
2444 // TODO (arik): move match into calling method for Taproot
2445 ChannelSignerType::Ecdsa(ecdsa) => {
2446 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2447 .map(|(signature, _)| msgs::FundingSigned {
2448 channel_id: self.channel_id(),
2451 partial_signature_with_nonce: None,
2455 if funding_signed.is_none() {
2456 #[cfg(not(async_signing))] {
2457 panic!("Failed to get signature for funding_signed");
2459 #[cfg(async_signing)] {
2460 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2461 self.signer_pending_funding = true;
2463 } else if self.signer_pending_funding {
2464 log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2465 self.signer_pending_funding = false;
2468 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2469 (counterparty_initial_commitment_tx, funding_signed)
2471 // TODO (taproot|arik)
2478 // Internal utility functions for channels
2480 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2481 /// `channel_value_satoshis` in msat, set through
2482 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2484 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2486 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2487 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2488 let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2490 } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2493 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2495 channel_value_satoshis * 10 * configured_percent
2498 /// Returns a minimum channel reserve value the remote needs to maintain,
2499 /// required by us according to the configured or default
2500 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2502 /// Guaranteed to return a value no larger than channel_value_satoshis
2504 /// This is used both for outbound and inbound channels and has lower bound
2505 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2506 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2507 let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2508 cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2511 /// This is for legacy reasons, present for forward-compatibility.
2512 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2513 /// from storage. Hence, we use this function to not persist default values of
2514 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2515 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2516 let (q, _) = channel_value_satoshis.overflowing_div(100);
2517 cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2520 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2521 // Note that num_htlcs should not include dust HTLCs.
2523 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2524 feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2527 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2528 // Note that num_htlcs should not include dust HTLCs.
2529 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2530 // Note that we need to divide before multiplying to round properly,
2531 // since the lowest denomination of bitcoin on-chain is the satoshi.
2532 (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2535 // Holder designates channel data owned for the benefit of the user client.
2536 // Counterparty designates channel data owned by the another channel participant entity.
2537 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2538 pub context: ChannelContext<SP>,
2541 #[cfg(any(test, fuzzing))]
2542 struct CommitmentTxInfoCached {
2544 total_pending_htlcs: usize,
2545 next_holder_htlc_id: u64,
2546 next_counterparty_htlc_id: u64,
2550 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
2551 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
2552 trait FailHTLCContents {
2553 type Message: FailHTLCMessageName;
2554 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
2555 fn to_inbound_htlc_state(self) -> InboundHTLCState;
2556 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
2558 impl FailHTLCContents for msgs::OnionErrorPacket {
2559 type Message = msgs::UpdateFailHTLC;
2560 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2561 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
2563 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2564 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
2566 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2567 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
2570 impl FailHTLCContents for ([u8; 32], u16) {
2571 type Message = msgs::UpdateFailMalformedHTLC;
2572 fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
2573 msgs::UpdateFailMalformedHTLC {
2576 sha256_of_onion: self.0,
2577 failure_code: self.1
2580 fn to_inbound_htlc_state(self) -> InboundHTLCState {
2581 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
2583 fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
2584 HTLCUpdateAwaitingACK::FailMalformedHTLC {
2586 sha256_of_onion: self.0,
2587 failure_code: self.1
2592 trait FailHTLCMessageName {
2593 fn name() -> &'static str;
2595 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
2596 fn name() -> &'static str {
2600 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
2601 fn name() -> &'static str {
2602 "update_fail_malformed_htlc"
2606 impl<SP: Deref> Channel<SP> where
2607 SP::Target: SignerProvider,
2608 <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2610 fn check_remote_fee<F: Deref, L: Deref>(
2611 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2612 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2613 ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2615 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2616 ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2618 ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2620 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2621 if feerate_per_kw < lower_limit {
2622 if let Some(cur_feerate) = cur_feerate_per_kw {
2623 if feerate_per_kw > cur_feerate {
2625 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2626 cur_feerate, feerate_per_kw);
2630 return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2636 fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2637 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2638 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2639 // outside of those situations will fail.
2640 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2644 fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2649 1 + // script length (0)
2653 )*4 + // * 4 for non-witness parts
2654 2 + // witness marker and flag
2655 1 + // witness element count
2656 4 + // 4 element lengths (2 sigs, multisig dummy, and witness script)
2657 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2658 2*(1 + 71); // two signatures + sighash type flags
2659 if let Some(spk) = a_scriptpubkey {
2660 ret += ((8+1) + // output values and script length
2661 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2663 if let Some(spk) = b_scriptpubkey {
2664 ret += ((8+1) + // output values and script length
2665 spk.len() as u64) * 4; // scriptpubkey and witness multiplier
2671 fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2672 assert!(self.context.pending_inbound_htlcs.is_empty());
2673 assert!(self.context.pending_outbound_htlcs.is_empty());
2674 assert!(self.context.pending_update_fee.is_none());
2676 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2677 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2678 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2680 if value_to_holder < 0 {
2681 assert!(self.context.is_outbound());
2682 total_fee_satoshis += (-value_to_holder) as u64;
2683 } else if value_to_counterparty < 0 {
2684 assert!(!self.context.is_outbound());
2685 total_fee_satoshis += (-value_to_counterparty) as u64;
2688 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2689 value_to_counterparty = 0;
2692 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2693 value_to_holder = 0;
2696 assert!(self.context.shutdown_scriptpubkey.is_some());
2697 let holder_shutdown_script = self.get_closing_scriptpubkey();
2698 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2699 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2701 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2702 (closing_transaction, total_fee_satoshis)
2705 fn funding_outpoint(&self) -> OutPoint {
2706 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2709 /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2712 /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2713 /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2715 /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2717 pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2718 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2719 where L::Target: Logger {
2720 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2721 // (see equivalent if condition there).
2722 assert!(!self.context.channel_state.can_generate_new_commitment());
2723 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2724 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2725 self.context.latest_monitor_update_id = mon_update_id;
2726 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2727 assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2731 fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2732 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2733 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2734 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2736 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2737 panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2740 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2741 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2742 // these, but for now we just have to treat them as normal.
2744 let mut pending_idx = core::usize::MAX;
2745 let mut htlc_value_msat = 0;
2746 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2747 if htlc.htlc_id == htlc_id_arg {
2748 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2749 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2750 htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2752 InboundHTLCState::Committed => {},
2753 InboundHTLCState::LocalRemoved(ref reason) => {
2754 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2756 log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2757 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2759 return UpdateFulfillFetch::DuplicateClaim {};
2762 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2763 // Don't return in release mode here so that we can update channel_monitor
2767 htlc_value_msat = htlc.amount_msat;
2771 if pending_idx == core::usize::MAX {
2772 #[cfg(any(test, fuzzing))]
2773 // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2774 // this is simply a duplicate claim, not previously failed and we lost funds.
2775 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2776 return UpdateFulfillFetch::DuplicateClaim {};
2779 // Now update local state:
2781 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2782 // can claim it even if the channel hits the chain before we see their next commitment.
2783 self.context.latest_monitor_update_id += 1;
2784 let monitor_update = ChannelMonitorUpdate {
2785 update_id: self.context.latest_monitor_update_id,
2786 counterparty_node_id: Some(self.context.counterparty_node_id),
2787 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2788 payment_preimage: payment_preimage_arg.clone(),
2792 if !self.context.channel_state.can_generate_new_commitment() {
2793 // Note that this condition is the same as the assertion in
2794 // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2795 // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2796 // do not not get into this branch.
2797 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2798 match pending_update {
2799 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2800 if htlc_id_arg == htlc_id {
2801 // Make sure we don't leave latest_monitor_update_id incremented here:
2802 self.context.latest_monitor_update_id -= 1;
2803 #[cfg(any(test, fuzzing))]
2804 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2805 return UpdateFulfillFetch::DuplicateClaim {};
2808 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2809 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2811 if htlc_id_arg == htlc_id {
2812 log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2813 // TODO: We may actually be able to switch to a fulfill here, though its
2814 // rare enough it may not be worth the complexity burden.
2815 debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2816 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2822 log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2823 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2824 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2826 #[cfg(any(test, fuzzing))]
2827 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2828 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2830 #[cfg(any(test, fuzzing))]
2831 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2834 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2835 if let InboundHTLCState::Committed = htlc.state {
2837 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2838 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2840 log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2841 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2844 UpdateFulfillFetch::NewClaim {
2847 msg: Some(msgs::UpdateFulfillHTLC {
2848 channel_id: self.context.channel_id(),
2849 htlc_id: htlc_id_arg,
2850 payment_preimage: payment_preimage_arg,
2855 pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2856 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2857 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2858 UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2859 // Even if we aren't supposed to let new monitor updates with commitment state
2860 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2861 // matter what. Sadly, to push a new monitor update which flies before others
2862 // already queued, we have to insert it into the pending queue and update the
2863 // update_ids of all the following monitors.
2864 if release_cs_monitor && msg.is_some() {
2865 let mut additional_update = self.build_commitment_no_status_check(logger);
2866 // build_commitment_no_status_check may bump latest_monitor_id but we want them
2867 // to be strictly increasing by one, so decrement it here.
2868 self.context.latest_monitor_update_id = monitor_update.update_id;
2869 monitor_update.updates.append(&mut additional_update.updates);
2871 let new_mon_id = self.context.blocked_monitor_updates.get(0)
2872 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2873 monitor_update.update_id = new_mon_id;
2874 for held_update in self.context.blocked_monitor_updates.iter_mut() {
2875 held_update.update.update_id += 1;
2878 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2879 let update = self.build_commitment_no_status_check(logger);
2880 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2886 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2887 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2889 UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2893 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2894 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2895 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2896 /// before we fail backwards.
2898 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2899 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2900 /// [`ChannelError::Ignore`].
2901 pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2902 -> Result<(), ChannelError> where L::Target: Logger {
2903 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2904 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2907 /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
2908 /// want to fail blinded HTLCs where we are not the intro node.
2910 /// See [`Self::queue_fail_htlc`] for more info.
2911 pub fn queue_fail_malformed_htlc<L: Deref>(
2912 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
2913 ) -> Result<(), ChannelError> where L::Target: Logger {
2914 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
2915 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2918 /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2919 /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2920 /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2921 /// before we fail backwards.
2923 /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2924 /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2925 /// [`ChannelError::Ignore`].
2926 fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
2927 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
2929 ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
2930 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2931 panic!("Was asked to fail an HTLC when channel was not in an operational state");
2934 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2935 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2936 // these, but for now we just have to treat them as normal.
2938 let mut pending_idx = core::usize::MAX;
2939 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2940 if htlc.htlc_id == htlc_id_arg {
2942 InboundHTLCState::Committed => {},
2943 InboundHTLCState::LocalRemoved(ref reason) => {
2944 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2946 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2951 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2952 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2958 if pending_idx == core::usize::MAX {
2959 #[cfg(any(test, fuzzing))]
2960 // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2961 // is simply a duplicate fail, not previously failed and we failed-back too early.
2962 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2966 if !self.context.channel_state.can_generate_new_commitment() {
2967 debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2968 force_holding_cell = true;
2971 // Now update local state:
2972 if force_holding_cell {
2973 for pending_update in self.context.holding_cell_htlc_updates.iter() {
2974 match pending_update {
2975 &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2976 if htlc_id_arg == htlc_id {
2977 #[cfg(any(test, fuzzing))]
2978 debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2982 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2983 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2985 if htlc_id_arg == htlc_id {
2986 debug_assert!(false, "Tried to fail an HTLC that was already failed");
2987 return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2993 log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2994 self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
2998 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
2999 E::Message::name(), &self.context.channel_id());
3001 let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3002 htlc.state = err_contents.clone().to_inbound_htlc_state();
3005 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
3008 // Message handlers:
3009 /// Updates the state of the channel to indicate that all channels in the batch have received
3010 /// funding_signed and persisted their monitors.
3011 /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3012 /// treated as a non-batch channel going forward.
3013 pub fn set_batch_ready(&mut self) {
3014 self.context.is_batch_funding = None;
3015 self.context.channel_state.clear_waiting_for_batch();
3018 /// Unsets the existing funding information.
3020 /// This must only be used if the channel has not yet completed funding and has not been used.
3022 /// Further, the channel must be immediately shut down after this with a call to
3023 /// [`ChannelContext::force_shutdown`].
3024 pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3025 debug_assert!(matches!(
3026 self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3028 self.context.channel_transaction_parameters.funding_outpoint = None;
3029 self.context.channel_id = temporary_channel_id;
3032 /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3033 /// and the channel is now usable (and public), this may generate an announcement_signatures to
3035 pub fn channel_ready<NS: Deref, L: Deref>(
3036 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3037 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3038 ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3040 NS::Target: NodeSigner,
3043 if self.context.channel_state.is_peer_disconnected() {
3044 self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3045 return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3048 if let Some(scid_alias) = msg.short_channel_id_alias {
3049 if Some(scid_alias) != self.context.short_channel_id {
3050 // The scid alias provided can be used to route payments *from* our counterparty,
3051 // i.e. can be used for inbound payments and provided in invoices, but is not used
3052 // when routing outbound payments.
3053 self.context.latest_inbound_scid_alias = Some(scid_alias);
3057 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3058 // batch, but we can receive channel_ready messages.
3059 let mut check_reconnection = false;
3060 match &self.context.channel_state {
3061 ChannelState::AwaitingChannelReady(flags) => {
3062 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
3063 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3064 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3065 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3066 check_reconnection = true;
3067 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3068 self.context.channel_state.set_their_channel_ready();
3069 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3070 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3071 self.context.update_time_counter += 1;
3073 // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3074 debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3077 // If we reconnected before sending our `channel_ready` they may still resend theirs.
3078 ChannelState::ChannelReady(_) => check_reconnection = true,
3079 _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3081 if check_reconnection {
3082 // They probably disconnected/reconnected and re-sent the channel_ready, which is
3083 // required, or they're sending a fresh SCID alias.
3084 let expected_point =
3085 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3086 // If they haven't ever sent an updated point, the point they send should match
3088 self.context.counterparty_cur_commitment_point
3089 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3090 // If we've advanced the commitment number once, the second commitment point is
3091 // at `counterparty_prev_commitment_point`, which is not yet revoked.
3092 debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3093 self.context.counterparty_prev_commitment_point
3095 // If they have sent updated points, channel_ready is always supposed to match
3096 // their "first" point, which we re-derive here.
3097 Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3098 &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3099 ).expect("We already advanced, so previous secret keys should have been validated already")))
3101 if expected_point != Some(msg.next_per_commitment_point) {
3102 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3107 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3108 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3110 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3112 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3115 pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3116 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3117 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3118 ) -> Result<(), ChannelError>
3119 where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3120 FE::Target: FeeEstimator, L::Target: Logger,
3122 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3123 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3125 // We can't accept HTLCs sent after we've sent a shutdown.
3126 if self.context.channel_state.is_local_shutdown_sent() {
3127 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3129 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3130 if self.context.channel_state.is_remote_shutdown_sent() {
3131 return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3133 if self.context.channel_state.is_peer_disconnected() {
3134 return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3136 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3137 return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3139 if msg.amount_msat == 0 {
3140 return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3142 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3143 return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3146 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3147 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3148 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3149 return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3151 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3152 return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3155 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3156 // the reserve_satoshis we told them to always have as direct payment so that they lose
3157 // something if we punish them for broadcasting an old state).
3158 // Note that we don't really care about having a small/no to_remote output in our local
3159 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3160 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3161 // present in the next commitment transaction we send them (at least for fulfilled ones,
3162 // failed ones won't modify value_to_self).
3163 // Note that we will send HTLCs which another instance of rust-lightning would think
3164 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3165 // Channel state once they will not be present in the next received commitment
3167 let mut removed_outbound_total_msat = 0;
3168 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3169 if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3170 removed_outbound_total_msat += htlc.amount_msat;
3171 } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3172 removed_outbound_total_msat += htlc.amount_msat;
3176 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3177 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3180 let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3181 (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3182 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3184 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3185 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3186 let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3187 if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3188 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3189 on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3190 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3194 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3195 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3196 let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3197 if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3198 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3199 on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3200 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3204 let pending_value_to_self_msat =
3205 self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3206 let pending_remote_value_msat =
3207 self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3208 if pending_remote_value_msat < msg.amount_msat {
3209 return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3212 // Check that the remote can afford to pay for this HTLC on-chain at the current
3213 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3215 let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3216 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3217 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3219 let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3220 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3224 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3225 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3227 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3228 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3232 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3233 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3237 if !self.context.is_outbound() {
3238 // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3239 // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3240 // side, only on the sender's. Note that with anchor outputs we are no longer as
3241 // sensitive to fee spikes, so we need to account for them.
3242 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3243 let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3244 if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3245 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3247 if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3248 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3249 // the HTLC, i.e. its status is already set to failing.
3250 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3251 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3254 // Check that they won't violate our local required channel reserve by adding this HTLC.
3255 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3256 let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3257 if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3258 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3261 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3262 return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3264 if msg.cltv_expiry >= 500000000 {
3265 return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3268 if self.context.channel_state.is_local_shutdown_sent() {
3269 if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3270 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3274 // Now update local state:
3275 self.context.next_counterparty_htlc_id += 1;
3276 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3277 htlc_id: msg.htlc_id,
3278 amount_msat: msg.amount_msat,
3279 payment_hash: msg.payment_hash,
3280 cltv_expiry: msg.cltv_expiry,
3281 state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3286 /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3288 fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3289 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3290 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3291 if htlc.htlc_id == htlc_id {
3292 let outcome = match check_preimage {
3293 None => fail_reason.into(),
3294 Some(payment_preimage) => {
3295 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3296 if payment_hash != htlc.payment_hash {
3297 return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3299 OutboundHTLCOutcome::Success(Some(payment_preimage))
3303 OutboundHTLCState::LocalAnnounced(_) =>
3304 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3305 OutboundHTLCState::Committed => {
3306 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3308 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3309 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3314 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3317 pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3318 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3319 return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3321 if self.context.channel_state.is_peer_disconnected() {
3322 return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3325 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3328 pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3329 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3330 return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3332 if self.context.channel_state.is_peer_disconnected() {
3333 return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3336 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3340 pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3341 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3342 return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3344 if self.context.channel_state.is_peer_disconnected() {
3345 return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3348 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3352 pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3353 where L::Target: Logger
3355 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3356 return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3358 if self.context.channel_state.is_peer_disconnected() {
3359 return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3361 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3362 return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3365 let funding_script = self.context.get_funding_redeemscript();
3367 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3369 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3370 let commitment_txid = {
3371 let trusted_tx = commitment_stats.tx.trust();
3372 let bitcoin_tx = trusted_tx.built_transaction();
3373 let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3375 log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3376 log_bytes!(msg.signature.serialize_compact()[..]),
3377 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3378 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3379 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3380 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3384 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3386 // If our counterparty updated the channel fee in this commitment transaction, check that
3387 // they can actually afford the new fee now.
3388 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3389 update_state == FeeUpdateState::RemoteAnnounced
3392 debug_assert!(!self.context.is_outbound());
3393 let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3394 if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3395 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3398 #[cfg(any(test, fuzzing))]
3400 if self.context.is_outbound() {
3401 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3402 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3403 if let Some(info) = projected_commit_tx_info {
3404 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3405 + self.context.holding_cell_htlc_updates.len();
3406 if info.total_pending_htlcs == total_pending_htlcs
3407 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3408 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3409 && info.feerate == self.context.feerate_per_kw {
3410 assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3416 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3417 return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3420 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3421 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3422 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3423 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3424 // backwards compatibility, we never use it in production. To provide test coverage, here,
3425 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3426 #[allow(unused_assignments, unused_mut)]
3427 let mut separate_nondust_htlc_sources = false;
3428 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3429 use core::hash::{BuildHasher, Hasher};
3430 // Get a random value using the only std API to do so - the DefaultHasher
3431 let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3432 separate_nondust_htlc_sources = rand_val % 2 == 0;
3435 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3436 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3437 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3438 if let Some(_) = htlc.transaction_output_index {
3439 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3440 self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3441 &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3443 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3444 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3445 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3446 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3447 log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3448 encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3449 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3450 return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3452 if !separate_nondust_htlc_sources {
3453 htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3456 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3458 if separate_nondust_htlc_sources {
3459 if let Some(source) = source_opt.take() {
3460 nondust_htlc_sources.push(source);
3463 debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3466 let holder_commitment_tx = HolderCommitmentTransaction::new(
3467 commitment_stats.tx,
3469 msg.htlc_signatures.clone(),
3470 &self.context.get_holder_pubkeys().funding_pubkey,
3471 self.context.counterparty_funding_pubkey()
3474 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3475 .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3477 // Update state now that we've passed all the can-fail calls...
3478 let mut need_commitment = false;
3479 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3480 if *update_state == FeeUpdateState::RemoteAnnounced {
3481 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3482 need_commitment = true;
3486 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3487 let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3488 Some(forward_info.clone())
3490 if let Some(forward_info) = new_forward {
3491 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3492 &htlc.payment_hash, &self.context.channel_id);
3493 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3494 need_commitment = true;
3497 let mut claimed_htlcs = Vec::new();
3498 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3499 if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3500 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3501 &htlc.payment_hash, &self.context.channel_id);
3502 // Grab the preimage, if it exists, instead of cloning
3503 let mut reason = OutboundHTLCOutcome::Success(None);
3504 mem::swap(outcome, &mut reason);
3505 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3506 // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3507 // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3508 // have a `Success(None)` reason. In this case we could forget some HTLC
3509 // claims, but such an upgrade is unlikely and including claimed HTLCs here
3510 // fixes a bug which the user was exposed to on 0.0.104 when they started the
3512 claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3514 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3515 need_commitment = true;
3519 self.context.latest_monitor_update_id += 1;
3520 let mut monitor_update = ChannelMonitorUpdate {
3521 update_id: self.context.latest_monitor_update_id,
3522 counterparty_node_id: Some(self.context.counterparty_node_id),
3523 updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3524 commitment_tx: holder_commitment_tx,
3525 htlc_outputs: htlcs_and_sigs,
3527 nondust_htlc_sources,
3531 self.context.cur_holder_commitment_transaction_number -= 1;
3532 self.context.expecting_peer_commitment_signed = false;
3533 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3534 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3535 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3537 if self.context.channel_state.is_monitor_update_in_progress() {
3538 // In case we initially failed monitor updating without requiring a response, we need
3539 // to make sure the RAA gets sent first.
3540 self.context.monitor_pending_revoke_and_ack = true;
3541 if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3542 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3543 // the corresponding HTLC status updates so that
3544 // get_last_commitment_update_for_send includes the right HTLCs.
3545 self.context.monitor_pending_commitment_signed = true;
3546 let mut additional_update = self.build_commitment_no_status_check(logger);
3547 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3548 // strictly increasing by one, so decrement it here.
3549 self.context.latest_monitor_update_id = monitor_update.update_id;
3550 monitor_update.updates.append(&mut additional_update.updates);
3552 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3553 &self.context.channel_id);
3554 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3557 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3558 // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3559 // we'll send one right away when we get the revoke_and_ack when we
3560 // free_holding_cell_htlcs().
3561 let mut additional_update = self.build_commitment_no_status_check(logger);
3562 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3563 // strictly increasing by one, so decrement it here.
3564 self.context.latest_monitor_update_id = monitor_update.update_id;
3565 monitor_update.updates.append(&mut additional_update.updates);
3569 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3570 &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3571 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3572 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3575 /// Public version of the below, checking relevant preconditions first.
3576 /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3577 /// returns `(None, Vec::new())`.
3578 pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3579 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3580 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3581 where F::Target: FeeEstimator, L::Target: Logger
3583 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
3584 self.free_holding_cell_htlcs(fee_estimator, logger)
3585 } else { (None, Vec::new()) }
3588 /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3589 /// for our counterparty.
3590 fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3591 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3592 ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3593 where F::Target: FeeEstimator, L::Target: Logger
3595 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3596 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3597 log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3598 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3600 let mut monitor_update = ChannelMonitorUpdate {
3601 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3602 counterparty_node_id: Some(self.context.counterparty_node_id),
3603 updates: Vec::new(),
3606 let mut htlc_updates = Vec::new();
3607 mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3608 let mut update_add_count = 0;
3609 let mut update_fulfill_count = 0;
3610 let mut update_fail_count = 0;
3611 let mut htlcs_to_fail = Vec::new();
3612 for htlc_update in htlc_updates.drain(..) {
3613 // Note that this *can* fail, though it should be due to rather-rare conditions on
3614 // fee races with adding too many outputs which push our total payments just over
3615 // the limit. In case it's less rare than I anticipate, we may want to revisit
3616 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3617 // to rebalance channels.
3618 let fail_htlc_res = match &htlc_update {
3619 &HTLCUpdateAwaitingACK::AddHTLC {
3620 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3621 skimmed_fee_msat, blinding_point, ..
3623 match self.send_htlc(
3624 amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3625 false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3627 Ok(_) => update_add_count += 1,
3630 ChannelError::Ignore(ref msg) => {
3631 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3632 // If we fail to send here, then this HTLC should
3633 // be failed backwards. Failing to send here
3634 // indicates that this HTLC may keep being put back
3635 // into the holding cell without ever being
3636 // successfully forwarded/failed/fulfilled, causing
3637 // our counterparty to eventually close on us.
3638 htlcs_to_fail.push((source.clone(), *payment_hash));
3641 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3648 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3649 // If an HTLC claim was previously added to the holding cell (via
3650 // `get_update_fulfill_htlc`, then generating the claim message itself must
3651 // not fail - any in between attempts to claim the HTLC will have resulted
3652 // in it hitting the holding cell again and we cannot change the state of a
3653 // holding cell HTLC from fulfill to anything else.
3654 let mut additional_monitor_update =
3655 if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3656 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3657 { monitor_update } else { unreachable!() };
3658 update_fulfill_count += 1;
3659 monitor_update.updates.append(&mut additional_monitor_update.updates);
3662 &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3663 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
3664 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3666 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
3667 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
3668 .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
3671 if let Some(res) = fail_htlc_res {
3673 Ok(fail_msg_opt) => {
3674 // If an HTLC failure was previously added to the holding cell (via
3675 // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
3676 // not fail - we should never end up in a state where we double-fail
3677 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3678 // for a full revocation before failing.
3679 debug_assert!(fail_msg_opt.is_some());
3680 update_fail_count += 1;
3682 Err(ChannelError::Ignore(_)) => {},
3684 panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3689 if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3690 return (None, htlcs_to_fail);
3692 let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3693 self.send_update_fee(feerate, false, fee_estimator, logger)
3698 let mut additional_update = self.build_commitment_no_status_check(logger);
3699 // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3700 // but we want them to be strictly increasing by one, so reset it here.
3701 self.context.latest_monitor_update_id = monitor_update.update_id;
3702 monitor_update.updates.append(&mut additional_update.updates);
3704 log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3705 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3706 update_add_count, update_fulfill_count, update_fail_count);
3708 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3709 (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3715 /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3716 /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3717 /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3718 /// generating an appropriate error *after* the channel state has been updated based on the
3719 /// revoke_and_ack message.
3720 pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3721 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3722 ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3723 where F::Target: FeeEstimator, L::Target: Logger,
3725 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3726 return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3728 if self.context.channel_state.is_peer_disconnected() {
3729 return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3731 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3732 return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3735 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3737 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3738 if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3739 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3743 if !self.context.channel_state.is_awaiting_remote_revoke() {
3744 // Our counterparty seems to have burned their coins to us (by revoking a state when we
3745 // haven't given them a new commitment transaction to broadcast). We should probably
3746 // take advantage of this by updating our channel monitor, sending them an error, and
3747 // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3748 // lot of work, and there's some chance this is all a misunderstanding anyway.
3749 // We have to do *something*, though, since our signer may get mad at us for otherwise
3750 // jumping a remote commitment number, so best to just force-close and move on.
3751 return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3754 #[cfg(any(test, fuzzing))]
3756 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3757 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3760 match &self.context.holder_signer {
3761 ChannelSignerType::Ecdsa(ecdsa) => {
3762 ecdsa.validate_counterparty_revocation(
3763 self.context.cur_counterparty_commitment_transaction_number + 1,
3765 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3767 // TODO (taproot|arik)
3772 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3773 .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3774 self.context.latest_monitor_update_id += 1;
3775 let mut monitor_update = ChannelMonitorUpdate {
3776 update_id: self.context.latest_monitor_update_id,
3777 counterparty_node_id: Some(self.context.counterparty_node_id),
3778 updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3779 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3780 secret: msg.per_commitment_secret,
3784 // Update state now that we've passed all the can-fail calls...
3785 // (note that we may still fail to generate the new commitment_signed message, but that's
3786 // OK, we step the channel here and *then* if the new generation fails we can fail the
3787 // channel based on that, but stepping stuff here should be safe either way.
3788 self.context.channel_state.clear_awaiting_remote_revoke();
3789 self.context.sent_message_awaiting_response = None;
3790 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3791 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3792 self.context.cur_counterparty_commitment_transaction_number -= 1;
3794 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3795 self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3798 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3799 let mut to_forward_infos = Vec::new();
3800 let mut revoked_htlcs = Vec::new();
3801 let mut finalized_claimed_htlcs = Vec::new();
3802 let mut update_fail_htlcs = Vec::new();
3803 let mut update_fail_malformed_htlcs = Vec::new();
3804 let mut require_commitment = false;
3805 let mut value_to_self_msat_diff: i64 = 0;
3808 // Take references explicitly so that we can hold multiple references to self.context.
3809 let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3810 let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3811 let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3813 // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3814 pending_inbound_htlcs.retain(|htlc| {
3815 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3816 log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3817 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3818 value_to_self_msat_diff += htlc.amount_msat as i64;
3820 *expecting_peer_commitment_signed = true;
3824 pending_outbound_htlcs.retain(|htlc| {
3825 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3826 log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3827 if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3828 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3830 finalized_claimed_htlcs.push(htlc.source.clone());
3831 // They fulfilled, so we sent them money
3832 value_to_self_msat_diff -= htlc.amount_msat as i64;
3837 for htlc in pending_inbound_htlcs.iter_mut() {
3838 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3840 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3844 let mut state = InboundHTLCState::Committed;
3845 mem::swap(&mut state, &mut htlc.state);
3847 if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3848 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3849 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3850 require_commitment = true;
3851 } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3852 match forward_info {
3853 PendingHTLCStatus::Fail(fail_msg) => {
3854 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3855 require_commitment = true;
3857 HTLCFailureMsg::Relay(msg) => {
3858 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3859 update_fail_htlcs.push(msg)
3861 HTLCFailureMsg::Malformed(msg) => {
3862 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3863 update_fail_malformed_htlcs.push(msg)
3867 PendingHTLCStatus::Forward(forward_info) => {
3868 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3869 to_forward_infos.push((forward_info, htlc.htlc_id));
3870 htlc.state = InboundHTLCState::Committed;
3876 for htlc in pending_outbound_htlcs.iter_mut() {
3877 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3878 log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3879 htlc.state = OutboundHTLCState::Committed;
3880 *expecting_peer_commitment_signed = true;
3882 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3883 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3884 // Grab the preimage, if it exists, instead of cloning
3885 let mut reason = OutboundHTLCOutcome::Success(None);
3886 mem::swap(outcome, &mut reason);
3887 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3888 require_commitment = true;
3892 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3894 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3895 match update_state {
3896 FeeUpdateState::Outbound => {
3897 debug_assert!(self.context.is_outbound());
3898 log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3899 self.context.feerate_per_kw = feerate;
3900 self.context.pending_update_fee = None;
3901 self.context.expecting_peer_commitment_signed = true;
3903 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3904 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3905 debug_assert!(!self.context.is_outbound());
3906 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3907 require_commitment = true;
3908 self.context.feerate_per_kw = feerate;
3909 self.context.pending_update_fee = None;
3914 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3915 let release_state_str =
3916 if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3917 macro_rules! return_with_htlcs_to_fail {
3918 ($htlcs_to_fail: expr) => {
3919 if !release_monitor {
3920 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3921 update: monitor_update,
3923 return Ok(($htlcs_to_fail, None));
3925 return Ok(($htlcs_to_fail, Some(monitor_update)));
3930 if self.context.channel_state.is_monitor_update_in_progress() {
3931 // We can't actually generate a new commitment transaction (incl by freeing holding
3932 // cells) while we can't update the monitor, so we just return what we have.
3933 if require_commitment {
3934 self.context.monitor_pending_commitment_signed = true;
3935 // When the monitor updating is restored we'll call
3936 // get_last_commitment_update_for_send(), which does not update state, but we're
3937 // definitely now awaiting a remote revoke before we can step forward any more, so
3939 let mut additional_update = self.build_commitment_no_status_check(logger);
3940 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3941 // strictly increasing by one, so decrement it here.
3942 self.context.latest_monitor_update_id = monitor_update.update_id;
3943 monitor_update.updates.append(&mut additional_update.updates);
3945 self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3946 self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3947 self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3948 log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3949 return_with_htlcs_to_fail!(Vec::new());
3952 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3953 (Some(mut additional_update), htlcs_to_fail) => {
3954 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3955 // strictly increasing by one, so decrement it here.
3956 self.context.latest_monitor_update_id = monitor_update.update_id;
3957 monitor_update.updates.append(&mut additional_update.updates);
3959 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3960 &self.context.channel_id(), release_state_str);
3962 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3963 return_with_htlcs_to_fail!(htlcs_to_fail);
3965 (None, htlcs_to_fail) => {
3966 if require_commitment {
3967 let mut additional_update = self.build_commitment_no_status_check(logger);
3969 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3970 // strictly increasing by one, so decrement it here.
3971 self.context.latest_monitor_update_id = monitor_update.update_id;
3972 monitor_update.updates.append(&mut additional_update.updates);
3974 log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3975 &self.context.channel_id(),
3976 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3979 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3980 return_with_htlcs_to_fail!(htlcs_to_fail);
3982 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3983 &self.context.channel_id(), release_state_str);
3985 self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3986 return_with_htlcs_to_fail!(htlcs_to_fail);
3992 /// Queues up an outbound update fee by placing it in the holding cell. You should call
3993 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3994 /// commitment update.
3995 pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3996 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3997 where F::Target: FeeEstimator, L::Target: Logger
3999 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4000 assert!(msg_opt.is_none(), "We forced holding cell?");
4003 /// Adds a pending update to this channel. See the doc for send_htlc for
4004 /// further details on the optionness of the return value.
4005 /// If our balance is too low to cover the cost of the next commitment transaction at the
4006 /// new feerate, the update is cancelled.
4008 /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4009 /// [`Channel`] if `force_holding_cell` is false.
4010 fn send_update_fee<F: Deref, L: Deref>(
4011 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4012 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4013 ) -> Option<msgs::UpdateFee>
4014 where F::Target: FeeEstimator, L::Target: Logger
4016 if !self.context.is_outbound() {
4017 panic!("Cannot send fee from inbound channel");
4019 if !self.context.is_usable() {
4020 panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4022 if !self.context.is_live() {
4023 panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4026 // Before proposing a feerate update, check that we can actually afford the new fee.
4027 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4028 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4029 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4030 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4031 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4032 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4033 if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4034 //TODO: auto-close after a number of failures?
4035 log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4039 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4040 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4041 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4042 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4043 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4044 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4047 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4048 log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4052 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4053 force_holding_cell = true;
4056 if force_holding_cell {
4057 self.context.holding_cell_update_fee = Some(feerate_per_kw);
4061 debug_assert!(self.context.pending_update_fee.is_none());
4062 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4064 Some(msgs::UpdateFee {
4065 channel_id: self.context.channel_id,
4070 /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4071 /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4073 /// No further message handling calls may be made until a channel_reestablish dance has
4075 /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4076 pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4077 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4078 if self.context.channel_state.is_pre_funded_state() {
4082 if self.context.channel_state.is_peer_disconnected() {
4083 // While the below code should be idempotent, it's simpler to just return early, as
4084 // redundant disconnect events can fire, though they should be rare.
4088 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4089 self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4092 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4093 // will be retransmitted.
4094 self.context.last_sent_closing_fee = None;
4095 self.context.pending_counterparty_closing_signed = None;
4096 self.context.closing_fee_limits = None;
4098 let mut inbound_drop_count = 0;
4099 self.context.pending_inbound_htlcs.retain(|htlc| {
4101 InboundHTLCState::RemoteAnnounced(_) => {
4102 // They sent us an update_add_htlc but we never got the commitment_signed.
4103 // We'll tell them what commitment_signed we're expecting next and they'll drop
4104 // this HTLC accordingly
4105 inbound_drop_count += 1;
4108 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4109 // We received a commitment_signed updating this HTLC and (at least hopefully)
4110 // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4111 // in response to it yet, so don't touch it.
4114 InboundHTLCState::Committed => true,
4115 InboundHTLCState::LocalRemoved(_) => {
4116 // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4117 // re-transmit if needed) and they may have even sent a revoke_and_ack back
4118 // (that we missed). Keep this around for now and if they tell us they missed
4119 // the commitment_signed we can re-transmit the update then.
4124 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4126 if let Some((_, update_state)) = self.context.pending_update_fee {
4127 if update_state == FeeUpdateState::RemoteAnnounced {
4128 debug_assert!(!self.context.is_outbound());
4129 self.context.pending_update_fee = None;
4133 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4134 if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4135 // They sent us an update to remove this but haven't yet sent the corresponding
4136 // commitment_signed, we need to move it back to Committed and they can re-send
4137 // the update upon reconnection.
4138 htlc.state = OutboundHTLCState::Committed;
4142 self.context.sent_message_awaiting_response = None;
4144 self.context.channel_state.set_peer_disconnected();
4145 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4149 /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4150 /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4151 /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4152 /// update completes (potentially immediately).
4153 /// The messages which were generated with the monitor update must *not* have been sent to the
4154 /// remote end, and must instead have been dropped. They will be regenerated when
4155 /// [`Self::monitor_updating_restored`] is called.
4157 /// [`ChannelManager`]: super::channelmanager::ChannelManager
4158 /// [`chain::Watch`]: crate::chain::Watch
4159 /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4160 fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4161 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4162 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4163 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4165 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4166 self.context.monitor_pending_commitment_signed |= resend_commitment;
4167 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4168 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4169 self.context.monitor_pending_failures.append(&mut pending_fails);
4170 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4171 self.context.channel_state.set_monitor_update_in_progress();
4174 /// Indicates that the latest ChannelMonitor update has been committed by the client
4175 /// successfully and we should restore normal operation. Returns messages which should be sent
4176 /// to the remote side.
4177 pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4178 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4179 user_config: &UserConfig, best_block_height: u32
4180 ) -> MonitorRestoreUpdates
4183 NS::Target: NodeSigner
4185 assert!(self.context.channel_state.is_monitor_update_in_progress());
4186 self.context.channel_state.clear_monitor_update_in_progress();
4188 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4189 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4190 // first received the funding_signed.
4191 let mut funding_broadcastable =
4192 if self.context.is_outbound() &&
4193 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4194 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
4196 self.context.funding_transaction.take()
4198 // That said, if the funding transaction is already confirmed (ie we're active with a
4199 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4200 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4201 funding_broadcastable = None;
4204 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4205 // (and we assume the user never directly broadcasts the funding transaction and waits for
4206 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4207 // * an inbound channel that failed to persist the monitor on funding_created and we got
4208 // the funding transaction confirmed before the monitor was persisted, or
4209 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4210 let channel_ready = if self.context.monitor_pending_channel_ready {
4211 assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4212 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4213 self.context.monitor_pending_channel_ready = false;
4214 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4215 Some(msgs::ChannelReady {
4216 channel_id: self.context.channel_id(),
4217 next_per_commitment_point,
4218 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4222 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4224 let mut accepted_htlcs = Vec::new();
4225 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4226 let mut failed_htlcs = Vec::new();
4227 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4228 let mut finalized_claimed_htlcs = Vec::new();
4229 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4231 if self.context.channel_state.is_peer_disconnected() {
4232 self.context.monitor_pending_revoke_and_ack = false;
4233 self.context.monitor_pending_commitment_signed = false;
4234 return MonitorRestoreUpdates {
4235 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4236 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4240 let raa = if self.context.monitor_pending_revoke_and_ack {
4241 Some(self.get_last_revoke_and_ack())
4243 let commitment_update = if self.context.monitor_pending_commitment_signed {
4244 self.get_last_commitment_update_for_send(logger).ok()
4246 if commitment_update.is_some() {
4247 self.mark_awaiting_response();
4250 self.context.monitor_pending_revoke_and_ack = false;
4251 self.context.monitor_pending_commitment_signed = false;
4252 let order = self.context.resend_order.clone();
4253 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4254 &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4255 if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4256 match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4257 MonitorRestoreUpdates {
4258 raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4262 pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4263 where F::Target: FeeEstimator, L::Target: Logger
4265 if self.context.is_outbound() {
4266 return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4268 if self.context.channel_state.is_peer_disconnected() {
4269 return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4271 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4273 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4274 self.context.update_time_counter += 1;
4275 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4276 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4277 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4278 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4279 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4280 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4281 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4282 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4283 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4284 msg.feerate_per_kw, holder_tx_dust_exposure)));
4286 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4287 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4288 msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4294 /// Indicates that the signer may have some signatures for us, so we should retry if we're
4296 #[cfg(async_signing)]
4297 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4298 let commitment_update = if self.context.signer_pending_commitment_update {
4299 self.get_last_commitment_update_for_send(logger).ok()
4301 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4302 self.context.get_funding_signed_msg(logger).1
4304 let channel_ready = if funding_signed.is_some() {
4305 self.check_get_channel_ready(0)
4308 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4309 if commitment_update.is_some() { "a" } else { "no" },
4310 if funding_signed.is_some() { "a" } else { "no" },
4311 if channel_ready.is_some() { "a" } else { "no" });
4313 SignerResumeUpdates {
4320 fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4321 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4322 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4323 msgs::RevokeAndACK {
4324 channel_id: self.context.channel_id,
4325 per_commitment_secret,
4326 next_per_commitment_point,
4328 next_local_nonce: None,
4332 /// Gets the last commitment update for immediate sending to our peer.
4333 fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4334 let mut update_add_htlcs = Vec::new();
4335 let mut update_fulfill_htlcs = Vec::new();
4336 let mut update_fail_htlcs = Vec::new();
4337 let mut update_fail_malformed_htlcs = Vec::new();
4339 for htlc in self.context.pending_outbound_htlcs.iter() {
4340 if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4341 update_add_htlcs.push(msgs::UpdateAddHTLC {
4342 channel_id: self.context.channel_id(),
4343 htlc_id: htlc.htlc_id,
4344 amount_msat: htlc.amount_msat,
4345 payment_hash: htlc.payment_hash,
4346 cltv_expiry: htlc.cltv_expiry,
4347 onion_routing_packet: (**onion_packet).clone(),
4348 skimmed_fee_msat: htlc.skimmed_fee_msat,
4349 blinding_point: htlc.blinding_point,
4354 for htlc in self.context.pending_inbound_htlcs.iter() {
4355 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4357 &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4358 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4359 channel_id: self.context.channel_id(),
4360 htlc_id: htlc.htlc_id,
4361 reason: err_packet.clone()
4364 &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4365 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4366 channel_id: self.context.channel_id(),
4367 htlc_id: htlc.htlc_id,
4368 sha256_of_onion: sha256_of_onion.clone(),
4369 failure_code: failure_code.clone(),
4372 &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4373 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4374 channel_id: self.context.channel_id(),
4375 htlc_id: htlc.htlc_id,
4376 payment_preimage: payment_preimage.clone(),
4383 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4384 Some(msgs::UpdateFee {
4385 channel_id: self.context.channel_id(),
4386 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4390 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4391 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4392 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4393 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4394 if self.context.signer_pending_commitment_update {
4395 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4396 self.context.signer_pending_commitment_update = false;
4400 #[cfg(not(async_signing))] {
4401 panic!("Failed to get signature for new commitment state");
4403 #[cfg(async_signing)] {
4404 if !self.context.signer_pending_commitment_update {
4405 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4406 self.context.signer_pending_commitment_update = true;
4411 Ok(msgs::CommitmentUpdate {
4412 update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4417 /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4418 pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4419 if self.context.channel_state.is_local_shutdown_sent() {
4420 assert!(self.context.shutdown_scriptpubkey.is_some());
4421 Some(msgs::Shutdown {
4422 channel_id: self.context.channel_id,
4423 scriptpubkey: self.get_closing_scriptpubkey(),
4428 /// May panic if some calls other than message-handling calls (which will all Err immediately)
4429 /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4431 /// Some links printed in log lines are included here to check them during build (when run with
4432 /// `cargo doc --document-private-items`):
4433 /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4434 /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4435 pub fn channel_reestablish<L: Deref, NS: Deref>(
4436 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4437 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4438 ) -> Result<ReestablishResponses, ChannelError>
4441 NS::Target: NodeSigner
4443 if !self.context.channel_state.is_peer_disconnected() {
4444 // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4445 // almost certainly indicates we are going to end up out-of-sync in some way, so we
4446 // just close here instead of trying to recover.
4447 return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4450 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4451 msg.next_local_commitment_number == 0 {
4452 return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4455 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4456 if msg.next_remote_commitment_number > 0 {
4457 let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4458 let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4459 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4460 if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4461 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4463 if msg.next_remote_commitment_number > our_commitment_transaction {
4464 macro_rules! log_and_panic {
4465 ($err_msg: expr) => {
4466 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4467 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4470 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4471 This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4472 More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4473 If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4474 ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4475 ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4476 Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4477 See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4481 // Before we change the state of the channel, we check if the peer is sending a very old
4482 // commitment transaction number, if yes we send a warning message.
4483 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4484 return Err(ChannelError::Warn(format!(
4485 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4486 msg.next_remote_commitment_number,
4487 our_commitment_transaction
4491 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4492 // remaining cases either succeed or ErrorMessage-fail).
4493 self.context.channel_state.clear_peer_disconnected();
4494 self.context.sent_message_awaiting_response = None;
4496 let shutdown_msg = self.get_outbound_shutdown();
4498 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4500 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4501 // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4502 if !self.context.channel_state.is_our_channel_ready() ||
4503 self.context.channel_state.is_monitor_update_in_progress() {
4504 if msg.next_remote_commitment_number != 0 {
4505 return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4507 // Short circuit the whole handler as there is nothing we can resend them
4508 return Ok(ReestablishResponses {
4509 channel_ready: None,
4510 raa: None, commitment_update: None,
4511 order: RAACommitmentOrder::CommitmentFirst,
4512 shutdown_msg, announcement_sigs,
4516 // We have OurChannelReady set!
4517 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4518 return Ok(ReestablishResponses {
4519 channel_ready: Some(msgs::ChannelReady {
4520 channel_id: self.context.channel_id(),
4521 next_per_commitment_point,
4522 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4524 raa: None, commitment_update: None,
4525 order: RAACommitmentOrder::CommitmentFirst,
4526 shutdown_msg, announcement_sigs,
4530 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4531 // Remote isn't waiting on any RevokeAndACK from us!
4532 // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4534 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4535 if self.context.channel_state.is_monitor_update_in_progress() {
4536 self.context.monitor_pending_revoke_and_ack = true;
4539 Some(self.get_last_revoke_and_ack())
4542 debug_assert!(false, "All values should have been handled in the four cases above");
4543 return Err(ChannelError::Close(format!(
4544 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4545 msg.next_remote_commitment_number,
4546 our_commitment_transaction
4550 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4551 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4552 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4553 // the corresponding revoke_and_ack back yet.
4554 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4555 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4556 self.mark_awaiting_response();
4558 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4560 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4561 // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4562 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4563 Some(msgs::ChannelReady {
4564 channel_id: self.context.channel_id(),
4565 next_per_commitment_point,
4566 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4570 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4571 if required_revoke.is_some() {
4572 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4574 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4577 Ok(ReestablishResponses {
4578 channel_ready, shutdown_msg, announcement_sigs,
4579 raa: required_revoke,
4580 commitment_update: None,
4581 order: self.context.resend_order.clone(),
4583 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4584 if required_revoke.is_some() {
4585 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4587 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4590 if self.context.channel_state.is_monitor_update_in_progress() {
4591 self.context.monitor_pending_commitment_signed = true;
4592 Ok(ReestablishResponses {
4593 channel_ready, shutdown_msg, announcement_sigs,
4594 commitment_update: None, raa: None,
4595 order: self.context.resend_order.clone(),
4598 Ok(ReestablishResponses {
4599 channel_ready, shutdown_msg, announcement_sigs,
4600 raa: required_revoke,
4601 commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4602 order: self.context.resend_order.clone(),
4605 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4606 Err(ChannelError::Close(format!(
4607 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4608 msg.next_local_commitment_number,
4609 next_counterparty_commitment_number,
4612 Err(ChannelError::Close(format!(
4613 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4614 msg.next_local_commitment_number,
4615 next_counterparty_commitment_number,
4620 /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4621 /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4622 /// at which point they will be recalculated.
4623 fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4625 where F::Target: FeeEstimator
4627 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4629 // Propose a range from our current Background feerate to our Normal feerate plus our
4630 // force_close_avoidance_max_fee_satoshis.
4631 // If we fail to come to consensus, we'll have to force-close.
4632 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4633 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4634 // that we don't expect to need fee bumping
4635 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4636 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4638 // The spec requires that (when the channel does not have anchors) we only send absolute
4639 // channel fees no greater than the absolute channel fee on the current commitment
4640 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4641 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4642 // some force-closure by old nodes, but we wanted to close the channel anyway.
4644 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4645 let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4646 proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4647 proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4650 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4651 // below our dust limit, causing the output to disappear. We don't bother handling this
4652 // case, however, as this should only happen if a channel is closed before any (material)
4653 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4654 // come to consensus with our counterparty on appropriate fees, however it should be a
4655 // relatively rare case. We can revisit this later, though note that in order to determine
4656 // if the funders' output is dust we have to know the absolute fee we're going to use.
4657 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4658 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4659 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4660 // We always add force_close_avoidance_max_fee_satoshis to our normal
4661 // feerate-calculated fee, but allow the max to be overridden if we're using a
4662 // target feerate-calculated fee.
4663 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4664 proposed_max_feerate as u64 * tx_weight / 1000)
4666 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4669 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4670 self.context.closing_fee_limits.clone().unwrap()
4673 /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4674 /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4675 /// this point if we're the funder we should send the initial closing_signed, and in any case
4676 /// shutdown should complete within a reasonable timeframe.
4677 fn closing_negotiation_ready(&self) -> bool {
4678 self.context.closing_negotiation_ready()
4681 /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4682 /// an Err if no progress is being made and the channel should be force-closed instead.
4683 /// Should be called on a one-minute timer.
4684 pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4685 if self.closing_negotiation_ready() {
4686 if self.context.closing_signed_in_flight {
4687 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4689 self.context.closing_signed_in_flight = true;
4695 pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4696 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4697 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4698 where F::Target: FeeEstimator, L::Target: Logger
4700 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4701 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4702 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4703 // that closing_negotiation_ready checks this case (as well as a few others).
4704 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4705 return Ok((None, None, None));
4708 if !self.context.is_outbound() {
4709 if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4710 return self.closing_signed(fee_estimator, &msg);
4712 return Ok((None, None, None));
4715 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4716 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4717 if self.context.expecting_peer_commitment_signed {
4718 return Ok((None, None, None));
4721 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4723 assert!(self.context.shutdown_scriptpubkey.is_some());
4724 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4725 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4726 our_min_fee, our_max_fee, total_fee_satoshis);
4728 match &self.context.holder_signer {
4729 ChannelSignerType::Ecdsa(ecdsa) => {
4731 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4732 .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4734 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4735 Ok((Some(msgs::ClosingSigned {
4736 channel_id: self.context.channel_id,
4737 fee_satoshis: total_fee_satoshis,
4739 fee_range: Some(msgs::ClosingSignedFeeRange {
4740 min_fee_satoshis: our_min_fee,
4741 max_fee_satoshis: our_max_fee,
4745 // TODO (taproot|arik)
4751 // Marks a channel as waiting for a response from the counterparty. If it's not received
4752 // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4754 fn mark_awaiting_response(&mut self) {
4755 self.context.sent_message_awaiting_response = Some(0);
4758 /// Determines whether we should disconnect the counterparty due to not receiving a response
4759 /// within our expected timeframe.
4761 /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4762 pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4763 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4766 // Don't disconnect when we're not waiting on a response.
4769 *ticks_elapsed += 1;
4770 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4774 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4775 ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4777 if self.context.channel_state.is_peer_disconnected() {
4778 return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4780 if self.context.channel_state.is_pre_funded_state() {
4781 // Spec says we should fail the connection, not the channel, but that's nonsense, there
4782 // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4783 // can do that via error message without getting a connection fail anyway...
4784 return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4786 for htlc in self.context.pending_inbound_htlcs.iter() {
4787 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4788 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4791 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4793 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4794 return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4797 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4798 if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4799 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4802 self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4805 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4806 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4807 // any further commitment updates after we set LocalShutdownSent.
4808 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4810 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4813 assert!(send_shutdown);
4814 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4815 Ok(scriptpubkey) => scriptpubkey,
4816 Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4818 if !shutdown_scriptpubkey.is_compatible(their_features) {
4819 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4821 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4826 // From here on out, we may not fail!
4828 self.context.channel_state.set_remote_shutdown_sent();
4829 self.context.update_time_counter += 1;
4831 let monitor_update = if update_shutdown_script {
4832 self.context.latest_monitor_update_id += 1;
4833 let monitor_update = ChannelMonitorUpdate {
4834 update_id: self.context.latest_monitor_update_id,
4835 counterparty_node_id: Some(self.context.counterparty_node_id),
4836 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4837 scriptpubkey: self.get_closing_scriptpubkey(),
4840 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4841 self.push_ret_blockable_mon_update(monitor_update)
4843 let shutdown = if send_shutdown {
4844 Some(msgs::Shutdown {
4845 channel_id: self.context.channel_id,
4846 scriptpubkey: self.get_closing_scriptpubkey(),
4850 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4851 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4852 // cell HTLCs and return them to fail the payment.
4853 self.context.holding_cell_update_fee = None;
4854 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4855 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4857 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4858 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4865 self.context.channel_state.set_local_shutdown_sent();
4866 self.context.update_time_counter += 1;
4868 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4871 fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4872 let mut tx = closing_tx.trust().built_transaction().clone();
4874 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4876 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4877 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4878 let mut holder_sig = sig.serialize_der().to_vec();
4879 holder_sig.push(EcdsaSighashType::All as u8);
4880 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4881 cp_sig.push(EcdsaSighashType::All as u8);
4882 if funding_key[..] < counterparty_funding_key[..] {
4883 tx.input[0].witness.push(holder_sig);
4884 tx.input[0].witness.push(cp_sig);
4886 tx.input[0].witness.push(cp_sig);
4887 tx.input[0].witness.push(holder_sig);
4890 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4894 pub fn closing_signed<F: Deref>(
4895 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4896 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4897 where F::Target: FeeEstimator
4899 if !self.context.channel_state.is_both_sides_shutdown() {
4900 return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4902 if self.context.channel_state.is_peer_disconnected() {
4903 return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4905 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4906 return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4908 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4909 return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4912 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4913 return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4916 if self.context.channel_state.is_monitor_update_in_progress() {
4917 self.context.pending_counterparty_closing_signed = Some(msg.clone());
4918 return Ok((None, None, None));
4921 let funding_redeemscript = self.context.get_funding_redeemscript();
4922 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4923 if used_total_fee != msg.fee_satoshis {
4924 return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4926 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4928 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4931 // The remote end may have decided to revoke their output due to inconsistent dust
4932 // limits, so check for that case by re-checking the signature here.
4933 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4934 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4935 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4939 for outp in closing_tx.trust().built_transaction().output.iter() {
4940 if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4941 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4945 assert!(self.context.shutdown_scriptpubkey.is_some());
4946 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4947 if last_fee == msg.fee_satoshis {
4948 let shutdown_result = ShutdownResult {
4949 closure_reason: ClosureReason::CooperativeClosure,
4950 monitor_update: None,
4951 dropped_outbound_htlcs: Vec::new(),
4952 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4953 channel_id: self.context.channel_id,
4954 user_channel_id: self.context.user_id,
4955 channel_capacity_satoshis: self.context.channel_value_satoshis,
4956 counterparty_node_id: self.context.counterparty_node_id,
4957 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4959 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4960 self.context.channel_state = ChannelState::ShutdownComplete;
4961 self.context.update_time_counter += 1;
4962 return Ok((None, Some(tx), Some(shutdown_result)));
4966 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4968 macro_rules! propose_fee {
4969 ($new_fee: expr) => {
4970 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4971 (closing_tx, $new_fee)
4973 self.build_closing_transaction($new_fee, false)
4976 return match &self.context.holder_signer {
4977 ChannelSignerType::Ecdsa(ecdsa) => {
4979 .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4980 .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4981 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4982 let shutdown_result = ShutdownResult {
4983 closure_reason: ClosureReason::CooperativeClosure,
4984 monitor_update: None,
4985 dropped_outbound_htlcs: Vec::new(),
4986 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4987 channel_id: self.context.channel_id,
4988 user_channel_id: self.context.user_id,
4989 channel_capacity_satoshis: self.context.channel_value_satoshis,
4990 counterparty_node_id: self.context.counterparty_node_id,
4991 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
4993 self.context.channel_state = ChannelState::ShutdownComplete;
4994 self.context.update_time_counter += 1;
4995 let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4996 (Some(tx), Some(shutdown_result))
5001 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5002 Ok((Some(msgs::ClosingSigned {
5003 channel_id: self.context.channel_id,
5004 fee_satoshis: used_fee,
5006 fee_range: Some(msgs::ClosingSignedFeeRange {
5007 min_fee_satoshis: our_min_fee,
5008 max_fee_satoshis: our_max_fee,
5010 }), signed_tx, shutdown_result))
5012 // TODO (taproot|arik)
5019 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5020 if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5021 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5023 if max_fee_satoshis < our_min_fee {
5024 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5026 if min_fee_satoshis > our_max_fee {
5027 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5030 if !self.context.is_outbound() {
5031 // They have to pay, so pick the highest fee in the overlapping range.
5032 // We should never set an upper bound aside from their full balance
5033 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5034 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5036 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5037 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5038 msg.fee_satoshis, our_min_fee, our_max_fee)));
5040 // The proposed fee is in our acceptable range, accept it and broadcast!
5041 propose_fee!(msg.fee_satoshis);
5044 // Old fee style negotiation. We don't bother to enforce whether they are complying
5045 // with the "making progress" requirements, we just comply and hope for the best.
5046 if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5047 if msg.fee_satoshis > last_fee {
5048 if msg.fee_satoshis < our_max_fee {
5049 propose_fee!(msg.fee_satoshis);
5050 } else if last_fee < our_max_fee {
5051 propose_fee!(our_max_fee);
5053 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5056 if msg.fee_satoshis > our_min_fee {
5057 propose_fee!(msg.fee_satoshis);
5058 } else if last_fee > our_min_fee {
5059 propose_fee!(our_min_fee);
5061 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5065 if msg.fee_satoshis < our_min_fee {
5066 propose_fee!(our_min_fee);
5067 } else if msg.fee_satoshis > our_max_fee {
5068 propose_fee!(our_max_fee);
5070 propose_fee!(msg.fee_satoshis);
5076 fn internal_htlc_satisfies_config(
5077 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5078 ) -> Result<(), (&'static str, u16)> {
5079 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5080 .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5081 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5082 (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5084 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5085 0x1000 | 12, // fee_insufficient
5088 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5090 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5091 0x1000 | 13, // incorrect_cltv_expiry
5097 /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5098 /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5099 /// unsuccessful, falls back to the previous one if one exists.
5100 pub fn htlc_satisfies_config(
5101 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5102 ) -> Result<(), (&'static str, u16)> {
5103 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5105 if let Some(prev_config) = self.context.prev_config() {
5106 self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5113 pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5114 self.context.cur_holder_commitment_transaction_number + 1
5117 pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5118 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5121 pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5122 self.context.cur_counterparty_commitment_transaction_number + 2
5126 pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5127 &self.context.holder_signer
5131 pub fn get_value_stat(&self) -> ChannelValueStat {
5133 value_to_self_msat: self.context.value_to_self_msat,
5134 channel_value_msat: self.context.channel_value_satoshis * 1000,
5135 channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5136 pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5137 pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5138 holding_cell_outbound_amount_msat: {
5140 for h in self.context.holding_cell_htlc_updates.iter() {
5142 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5150 counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5151 counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5155 /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5156 /// Allowed in any state (including after shutdown)
5157 pub fn is_awaiting_monitor_update(&self) -> bool {
5158 self.context.channel_state.is_monitor_update_in_progress()
5161 /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5162 pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5163 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5164 self.context.blocked_monitor_updates[0].update.update_id - 1
5167 /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5168 /// further blocked monitor update exists after the next.
5169 pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5170 if self.context.blocked_monitor_updates.is_empty() { return None; }
5171 Some((self.context.blocked_monitor_updates.remove(0).update,
5172 !self.context.blocked_monitor_updates.is_empty()))
5175 /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5176 /// immediately given to the user for persisting or `None` if it should be held as blocked.
5177 fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5178 -> Option<ChannelMonitorUpdate> {
5179 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5180 if !release_monitor {
5181 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5190 pub fn blocked_monitor_updates_pending(&self) -> usize {
5191 self.context.blocked_monitor_updates.len()
5194 /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5195 /// If the channel is outbound, this implies we have not yet broadcasted the funding
5196 /// transaction. If the channel is inbound, this implies simply that the channel has not
5198 pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5199 if !self.is_awaiting_monitor_update() { return false; }
5201 self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5202 if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
5204 // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5205 // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5206 debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5209 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5210 self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5211 // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5212 // waiting for the initial monitor persistence. Thus, we check if our commitment
5213 // transaction numbers have both been iterated only exactly once (for the
5214 // funding_signed), and we're awaiting monitor update.
5216 // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5217 // only way to get an awaiting-monitor-update state during initial funding is if the
5218 // initial monitor persistence is still pending).
5220 // Because deciding we're awaiting initial broadcast spuriously could result in
5221 // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5222 // we hard-assert here, even in production builds.
5223 if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5224 assert!(self.context.monitor_pending_channel_ready);
5225 assert_eq!(self.context.latest_monitor_update_id, 0);
5231 /// Returns true if our channel_ready has been sent
5232 pub fn is_our_channel_ready(&self) -> bool {
5233 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5234 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5237 /// Returns true if our peer has either initiated or agreed to shut down the channel.
5238 pub fn received_shutdown(&self) -> bool {
5239 self.context.channel_state.is_remote_shutdown_sent()
5242 /// Returns true if we either initiated or agreed to shut down the channel.
5243 pub fn sent_shutdown(&self) -> bool {
5244 self.context.channel_state.is_local_shutdown_sent()
5247 /// Returns true if this channel is fully shut down. True here implies that no further actions
5248 /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5249 /// will be handled appropriately by the chain monitor.
5250 pub fn is_shutdown(&self) -> bool {
5251 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5254 pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5255 self.context.channel_update_status
5258 pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5259 self.context.update_time_counter += 1;
5260 self.context.channel_update_status = status;
5263 fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5265 // * always when a new block/transactions are confirmed with the new height
5266 // * when funding is signed with a height of 0
5267 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5271 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5272 if funding_tx_confirmations <= 0 {
5273 self.context.funding_tx_confirmation_height = 0;
5276 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5280 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5281 // channel_ready yet.
5282 if self.context.signer_pending_funding {
5286 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5287 // channel_ready until the entire batch is ready.
5288 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
5289 self.context.channel_state.set_our_channel_ready();
5291 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5292 self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5293 self.context.update_time_counter += 1;
5295 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5296 // We got a reorg but not enough to trigger a force close, just ignore.
5299 if self.context.funding_tx_confirmation_height != 0 &&
5300 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5302 // We should never see a funding transaction on-chain until we've received
5303 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5304 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5305 // however, may do this and we shouldn't treat it as a bug.
5306 #[cfg(not(fuzzing))]
5307 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5308 Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5309 self.context.channel_state.to_u32());
5311 // We got a reorg but not enough to trigger a force close, just ignore.
5315 if need_commitment_update {
5316 if !self.context.channel_state.is_monitor_update_in_progress() {
5317 if !self.context.channel_state.is_peer_disconnected() {
5318 let next_per_commitment_point =
5319 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5320 return Some(msgs::ChannelReady {
5321 channel_id: self.context.channel_id,
5322 next_per_commitment_point,
5323 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5327 self.context.monitor_pending_channel_ready = true;
5333 /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5334 /// In the first case, we store the confirmation height and calculating the short channel id.
5335 /// In the second, we simply return an Err indicating we need to be force-closed now.
5336 pub fn transactions_confirmed<NS: Deref, L: Deref>(
5337 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5338 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5339 ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5341 NS::Target: NodeSigner,
5344 let mut msgs = (None, None);
5345 if let Some(funding_txo) = self.context.get_funding_txo() {
5346 for &(index_in_block, tx) in txdata.iter() {
5347 // Check if the transaction is the expected funding transaction, and if it is,
5348 // check that it pays the right amount to the right script.
5349 if self.context.funding_tx_confirmation_height == 0 {
5350 if tx.txid() == funding_txo.txid {
5351 let txo_idx = funding_txo.index as usize;
5352 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5353 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5354 if self.context.is_outbound() {
5355 // If we generated the funding transaction and it doesn't match what it
5356 // should, the client is really broken and we should just panic and
5357 // tell them off. That said, because hash collisions happen with high
5358 // probability in fuzzing mode, if we're fuzzing we just close the
5359 // channel and move on.
5360 #[cfg(not(fuzzing))]
5361 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5363 self.context.update_time_counter += 1;
5364 let err_reason = "funding tx had wrong script/value or output index";
5365 return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5367 if self.context.is_outbound() {
5368 if !tx.is_coin_base() {
5369 for input in tx.input.iter() {
5370 if input.witness.is_empty() {
5371 // We generated a malleable funding transaction, implying we've
5372 // just exposed ourselves to funds loss to our counterparty.
5373 #[cfg(not(fuzzing))]
5374 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5379 self.context.funding_tx_confirmation_height = height;
5380 self.context.funding_tx_confirmed_in = Some(*block_hash);
5381 self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5382 Ok(scid) => Some(scid),
5383 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5386 // If this is a coinbase transaction and not a 0-conf channel
5387 // we should update our min_depth to 100 to handle coinbase maturity
5388 if tx.is_coin_base() &&
5389 self.context.minimum_depth.unwrap_or(0) > 0 &&
5390 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5391 self.context.minimum_depth = Some(COINBASE_MATURITY);
5394 // If we allow 1-conf funding, we may need to check for channel_ready here and
5395 // send it immediately instead of waiting for a best_block_updated call (which
5396 // may have already happened for this block).
5397 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5398 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5399 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5400 msgs = (Some(channel_ready), announcement_sigs);
5403 for inp in tx.input.iter() {
5404 if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5405 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5406 return Err(ClosureReason::CommitmentTxConfirmed);
5414 /// When a new block is connected, we check the height of the block against outbound holding
5415 /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5416 /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5417 /// handled by the ChannelMonitor.
5419 /// If we return Err, the channel may have been closed, at which point the standard
5420 /// requirements apply - no calls may be made except those explicitly stated to be allowed
5423 /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5425 pub fn best_block_updated<NS: Deref, L: Deref>(
5426 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5427 node_signer: &NS, user_config: &UserConfig, logger: &L
5428 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5430 NS::Target: NodeSigner,
5433 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5436 fn do_best_block_updated<NS: Deref, L: Deref>(
5437 &mut self, height: u32, highest_header_time: u32,
5438 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5439 ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5441 NS::Target: NodeSigner,
5444 let mut timed_out_htlcs = Vec::new();
5445 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5446 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5448 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5449 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5451 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5452 if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5453 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5461 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5463 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5464 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5465 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5467 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5468 return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5471 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5472 self.context.channel_state.is_our_channel_ready() {
5473 let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5474 if self.context.funding_tx_confirmation_height == 0 {
5475 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5476 // zero if it has been reorged out, however in either case, our state flags
5477 // indicate we've already sent a channel_ready
5478 funding_tx_confirmations = 0;
5481 // If we've sent channel_ready (or have both sent and received channel_ready), and
5482 // the funding transaction has become unconfirmed,
5483 // close the channel and hope we can get the latest state on chain (because presumably
5484 // the funding transaction is at least still in the mempool of most nodes).
5486 // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5487 // 0-conf channel, but not doing so may lead to the
5488 // `ChannelManager::short_to_chan_info` map being inconsistent, so we currently have
5490 if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5491 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5492 self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5493 return Err(ClosureReason::ProcessingError { err: err_reason });
5495 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5496 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5497 log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5498 // If funding_tx_confirmed_in is unset, the channel must not be active
5499 assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5500 assert!(!self.context.channel_state.is_our_channel_ready());
5501 return Err(ClosureReason::FundingTimedOut);
5504 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5505 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5507 Ok((None, timed_out_htlcs, announcement_sigs))
5510 /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5511 /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5512 /// before the channel has reached channel_ready and we can just wait for more blocks.
5513 pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5514 if self.context.funding_tx_confirmation_height != 0 {
5515 // We handle the funding disconnection by calling best_block_updated with a height one
5516 // below where our funding was connected, implying a reorg back to conf_height - 1.
5517 let reorg_height = self.context.funding_tx_confirmation_height - 1;
5518 // We use the time field to bump the current time we set on channel updates if its
5519 // larger. If we don't know that time has moved forward, we can just set it to the last
5520 // time we saw and it will be ignored.
5521 let best_time = self.context.update_time_counter;
5522 match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
5523 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5524 assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5525 assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5526 assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5532 // We never learned about the funding confirmation anyway, just ignore
5537 // Methods to get unprompted messages to send to the remote end (or where we already returned
5538 // something in the handler for the message that prompted this message):
5540 /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5541 /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5542 /// directions). Should be used for both broadcasted announcements and in response to an
5543 /// AnnouncementSignatures message from the remote peer.
5545 /// Will only fail if we're not in a state where channel_announcement may be sent (including
5548 /// This will only return ChannelError::Ignore upon failure.
5550 /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5551 fn get_channel_announcement<NS: Deref>(
5552 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5553 ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5554 if !self.context.config.announced_channel {
5555 return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5557 if !self.context.is_usable() {
5558 return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5561 let short_channel_id = self.context.get_short_channel_id()
5562 .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5563 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5564 .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5565 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5566 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5568 let msg = msgs::UnsignedChannelAnnouncement {
5569 features: channelmanager::provided_channel_features(&user_config),
5572 node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5573 node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5574 bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5575 bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5576 excess_data: Vec::new(),
5582 fn get_announcement_sigs<NS: Deref, L: Deref>(
5583 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5584 best_block_height: u32, logger: &L
5585 ) -> Option<msgs::AnnouncementSignatures>
5587 NS::Target: NodeSigner,
5590 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5594 if !self.context.is_usable() {
5598 if self.context.channel_state.is_peer_disconnected() {
5599 log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5603 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5607 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5608 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5611 log_trace!(logger, "{:?}", e);
5615 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5617 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5622 match &self.context.holder_signer {
5623 ChannelSignerType::Ecdsa(ecdsa) => {
5624 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5626 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5631 let short_channel_id = match self.context.get_short_channel_id() {
5633 None => return None,
5636 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5638 Some(msgs::AnnouncementSignatures {
5639 channel_id: self.context.channel_id(),
5641 node_signature: our_node_sig,
5642 bitcoin_signature: our_bitcoin_sig,
5645 // TODO (taproot|arik)
5651 /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5653 fn sign_channel_announcement<NS: Deref>(
5654 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5655 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5656 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5657 let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5658 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5659 let were_node_one = announcement.node_id_1 == our_node_key;
5661 let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5662 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5663 match &self.context.holder_signer {
5664 ChannelSignerType::Ecdsa(ecdsa) => {
5665 let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5666 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5667 Ok(msgs::ChannelAnnouncement {
5668 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5669 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5670 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5671 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5672 contents: announcement,
5675 // TODO (taproot|arik)
5680 Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5684 /// Processes an incoming announcement_signatures message, providing a fully-signed
5685 /// channel_announcement message which we can broadcast and storing our counterparty's
5686 /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5687 pub fn announcement_signatures<NS: Deref>(
5688 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5689 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5690 ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5691 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5693 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5695 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5696 return Err(ChannelError::Close(format!(
5697 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5698 &announcement, self.context.get_counterparty_node_id())));
5700 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5701 return Err(ChannelError::Close(format!(
5702 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5703 &announcement, self.context.counterparty_funding_pubkey())));
5706 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5707 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5708 return Err(ChannelError::Ignore(
5709 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5712 self.sign_channel_announcement(node_signer, announcement)
5715 /// Gets a signed channel_announcement for this channel, if we previously received an
5716 /// announcement_signatures from our counterparty.
5717 pub fn get_signed_channel_announcement<NS: Deref>(
5718 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5719 ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5720 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5723 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5725 Err(_) => return None,
5727 match self.sign_channel_announcement(node_signer, announcement) {
5728 Ok(res) => Some(res),
5733 /// May panic if called on a channel that wasn't immediately-previously
5734 /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5735 pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5736 assert!(self.context.channel_state.is_peer_disconnected());
5737 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5738 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5739 // current to_remote balances. However, it no longer has any use, and thus is now simply
5740 // set to a dummy (but valid, as required by the spec) public key.
5741 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5742 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5743 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5744 let mut pk = [2; 33]; pk[1] = 0xff;
5745 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5746 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5747 let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5748 log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5751 log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5754 self.mark_awaiting_response();
5755 msgs::ChannelReestablish {
5756 channel_id: self.context.channel_id(),
5757 // The protocol has two different commitment number concepts - the "commitment
5758 // transaction number", which starts from 0 and counts up, and the "revocation key
5759 // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5760 // commitment transaction numbers by the index which will be used to reveal the
5761 // revocation key for that commitment transaction, which means we have to convert them
5762 // to protocol-level commitment numbers here...
5764 // next_local_commitment_number is the next commitment_signed number we expect to
5765 // receive (indicating if they need to resend one that we missed).
5766 next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5767 // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5768 // receive, however we track it by the next commitment number for a remote transaction
5769 // (which is one further, as they always revoke previous commitment transaction, not
5770 // the one we send) so we have to decrement by 1. Note that if
5771 // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5772 // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5774 next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5775 your_last_per_commitment_secret: remote_last_secret,
5776 my_current_per_commitment_point: dummy_pubkey,
5777 // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5778 // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5779 // txid of that interactive transaction, else we MUST NOT set it.
5780 next_funding_txid: None,
5785 // Send stuff to our remote peers:
5787 /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5788 /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5789 /// commitment update.
5791 /// `Err`s will only be [`ChannelError::Ignore`].
5792 pub fn queue_add_htlc<F: Deref, L: Deref>(
5793 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5794 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5795 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5796 ) -> Result<(), ChannelError>
5797 where F::Target: FeeEstimator, L::Target: Logger
5800 .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5801 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5802 .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5804 if let ChannelError::Ignore(_) = err { /* fine */ }
5805 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5810 /// Adds a pending outbound HTLC to this channel, note that you probably want
5811 /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5813 /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5815 /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5816 /// wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5818 /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5819 /// we may not yet have sent the previous commitment update messages and will need to
5820 /// regenerate them.
5822 /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5823 /// on this [`Channel`] if `force_holding_cell` is false.
5825 /// `Err`s will only be [`ChannelError::Ignore`].
5826 fn send_htlc<F: Deref, L: Deref>(
5827 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5828 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5829 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5830 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5831 ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5832 where F::Target: FeeEstimator, L::Target: Logger
5834 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5835 self.context.channel_state.is_local_shutdown_sent() ||
5836 self.context.channel_state.is_remote_shutdown_sent()
5838 return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5840 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5841 if amount_msat > channel_total_msat {
5842 return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5845 if amount_msat == 0 {
5846 return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5849 let available_balances = self.context.get_available_balances(fee_estimator);
5850 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5851 return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5852 available_balances.next_outbound_htlc_minimum_msat)));
5855 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5856 return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5857 available_balances.next_outbound_htlc_limit_msat)));
5860 if self.context.channel_state.is_peer_disconnected() {
5861 // Note that this should never really happen, if we're !is_live() on receipt of an
5862 // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5863 // the user to send directly into a !is_live() channel. However, if we
5864 // disconnected during the time the previous hop was doing the commitment dance we may
5865 // end up getting here after the forwarding delay. In any case, returning an
5866 // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5867 return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5870 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
5871 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5872 payment_hash, amount_msat,
5873 if force_holding_cell { "into holding cell" }
5874 else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5875 else { "to peer" });
5877 if need_holding_cell {
5878 force_holding_cell = true;
5881 // Now update local state:
5882 if force_holding_cell {
5883 self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5888 onion_routing_packet,
5895 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5896 htlc_id: self.context.next_holder_htlc_id,
5898 payment_hash: payment_hash.clone(),
5900 state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5906 let res = msgs::UpdateAddHTLC {
5907 channel_id: self.context.channel_id,
5908 htlc_id: self.context.next_holder_htlc_id,
5912 onion_routing_packet,
5916 self.context.next_holder_htlc_id += 1;
5921 fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5922 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5923 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5924 // fail to generate this, we still are at least at a position where upgrading their status
5926 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5927 let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5928 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5930 if let Some(state) = new_state {
5931 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5935 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5936 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5937 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5938 // Grab the preimage, if it exists, instead of cloning
5939 let mut reason = OutboundHTLCOutcome::Success(None);
5940 mem::swap(outcome, &mut reason);
5941 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5944 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5945 if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5946 debug_assert!(!self.context.is_outbound());
5947 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5948 self.context.feerate_per_kw = feerate;
5949 self.context.pending_update_fee = None;
5952 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5954 let (mut htlcs_ref, counterparty_commitment_tx) =
5955 self.build_commitment_no_state_update(logger);
5956 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5957 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5958 htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5960 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5961 self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5964 self.context.latest_monitor_update_id += 1;
5965 let monitor_update = ChannelMonitorUpdate {
5966 update_id: self.context.latest_monitor_update_id,
5967 counterparty_node_id: Some(self.context.counterparty_node_id),
5968 updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5969 commitment_txid: counterparty_commitment_txid,
5970 htlc_outputs: htlcs.clone(),
5971 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5972 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5973 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5974 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5975 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5978 self.context.channel_state.set_awaiting_remote_revoke();
5982 fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5983 -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5984 where L::Target: Logger
5986 let counterparty_keys = self.context.build_remote_transaction_keys();
5987 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5988 let counterparty_commitment_tx = commitment_stats.tx;
5990 #[cfg(any(test, fuzzing))]
5992 if !self.context.is_outbound() {
5993 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5994 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5995 if let Some(info) = projected_commit_tx_info {
5996 let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5997 if info.total_pending_htlcs == total_pending_htlcs
5998 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5999 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
6000 && info.feerate == self.context.feerate_per_kw {
6001 let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
6002 assert_eq!(actual_fee, info.fee);
6008 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6011 /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6012 /// generation when we shouldn't change HTLC/channel state.
6013 fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6014 // Get the fee tests from `build_commitment_no_state_update`
6015 #[cfg(any(test, fuzzing))]
6016 self.build_commitment_no_state_update(logger);
6018 let counterparty_keys = self.context.build_remote_transaction_keys();
6019 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6020 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6022 match &self.context.holder_signer {
6023 ChannelSignerType::Ecdsa(ecdsa) => {
6024 let (signature, htlc_signatures);
6027 let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6028 for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6032 let res = ecdsa.sign_counterparty_commitment(
6033 &commitment_stats.tx,
6034 commitment_stats.inbound_htlc_preimages,
6035 commitment_stats.outbound_htlc_preimages,
6036 &self.context.secp_ctx,
6037 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6039 htlc_signatures = res.1;
6041 log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6042 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6043 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6044 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6046 for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6047 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6048 encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6049 encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6050 log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6051 log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6055 Ok((msgs::CommitmentSigned {
6056 channel_id: self.context.channel_id,
6060 partial_signature_with_nonce: None,
6061 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6063 // TODO (taproot|arik)
6069 /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6070 /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6072 /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6073 /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6074 pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6075 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6076 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6077 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6078 ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6079 where F::Target: FeeEstimator, L::Target: Logger
6081 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6082 onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6083 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6086 let monitor_update = self.build_commitment_no_status_check(logger);
6087 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6088 Ok(self.push_ret_blockable_mon_update(monitor_update))
6094 /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6096 pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6097 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6098 fee_base_msat: msg.contents.fee_base_msat,
6099 fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6100 cltv_expiry_delta: msg.contents.cltv_expiry_delta
6102 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6104 self.context.counterparty_forwarding_info = new_forwarding_info;
6110 /// Begins the shutdown process, getting a message for the remote peer and returning all
6111 /// holding cell HTLCs for payment failure.
6112 pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6113 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6114 -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6116 for htlc in self.context.pending_outbound_htlcs.iter() {
6117 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6118 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6121 if self.context.channel_state.is_local_shutdown_sent() {
6122 return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6124 else if self.context.channel_state.is_remote_shutdown_sent() {
6125 return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6127 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6128 return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6130 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6131 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6132 return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6135 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6138 // use override shutdown script if provided
6139 let shutdown_scriptpubkey = match override_shutdown_script {
6140 Some(script) => script,
6142 // otherwise, use the shutdown scriptpubkey provided by the signer
6143 match signer_provider.get_shutdown_scriptpubkey() {
6144 Ok(scriptpubkey) => scriptpubkey,
6145 Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6149 if !shutdown_scriptpubkey.is_compatible(their_features) {
6150 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6152 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6157 // From here on out, we may not fail!
6158 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6159 self.context.channel_state.set_local_shutdown_sent();
6160 self.context.update_time_counter += 1;
6162 let monitor_update = if update_shutdown_script {
6163 self.context.latest_monitor_update_id += 1;
6164 let monitor_update = ChannelMonitorUpdate {
6165 update_id: self.context.latest_monitor_update_id,
6166 counterparty_node_id: Some(self.context.counterparty_node_id),
6167 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6168 scriptpubkey: self.get_closing_scriptpubkey(),
6171 self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6172 self.push_ret_blockable_mon_update(monitor_update)
6174 let shutdown = msgs::Shutdown {
6175 channel_id: self.context.channel_id,
6176 scriptpubkey: self.get_closing_scriptpubkey(),
6179 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6180 // our shutdown until we've committed all of the pending changes.
6181 self.context.holding_cell_update_fee = None;
6182 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6183 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6185 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6186 dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6193 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6194 "we can't both complete shutdown and return a monitor update");
6196 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6199 pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6200 self.context.holding_cell_htlc_updates.iter()
6201 .flat_map(|htlc_update| {
6203 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6204 => Some((source, payment_hash)),
6208 .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6212 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6213 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6214 pub context: ChannelContext<SP>,
6215 pub unfunded_context: UnfundedChannelContext,
6218 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6219 pub fn new<ES: Deref, F: Deref>(
6220 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6221 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6222 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6223 ) -> Result<OutboundV1Channel<SP>, APIError>
6224 where ES::Target: EntropySource,
6225 F::Target: FeeEstimator
6227 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6228 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6229 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6230 let pubkeys = holder_signer.pubkeys().clone();
6232 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6233 return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6235 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6236 return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6238 let channel_value_msat = channel_value_satoshis * 1000;
6239 if push_msat > channel_value_msat {
6240 return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6242 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6243 return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6245 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6246 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6247 // Protocol level safety check in place, although it should never happen because
6248 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6249 return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6252 let channel_type = Self::get_initial_channel_type(&config, their_features);
6253 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6255 let (commitment_conf_target, anchor_outputs_value_msat) = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6256 (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6258 (ConfirmationTarget::NonAnchorChannelFee, 0)
6260 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6262 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6263 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6264 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6265 return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6268 let mut secp_ctx = Secp256k1::new();
6269 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6271 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6272 match signer_provider.get_shutdown_scriptpubkey() {
6273 Ok(scriptpubkey) => Some(scriptpubkey),
6274 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6278 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6279 if !shutdown_scriptpubkey.is_compatible(&their_features) {
6280 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6284 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6285 Ok(script) => script,
6286 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6289 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6292 context: ChannelContext {
6295 config: LegacyChannelConfig {
6296 options: config.channel_config.clone(),
6297 announced_channel: config.channel_handshake_config.announced_channel,
6298 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6303 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6305 channel_id: temporary_channel_id,
6306 temporary_channel_id: Some(temporary_channel_id),
6307 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6308 announcement_sigs_state: AnnouncementSigsState::NotSent,
6310 channel_value_satoshis,
6312 latest_monitor_update_id: 0,
6314 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6315 shutdown_scriptpubkey,
6318 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6319 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6322 pending_inbound_htlcs: Vec::new(),
6323 pending_outbound_htlcs: Vec::new(),
6324 holding_cell_htlc_updates: Vec::new(),
6325 pending_update_fee: None,
6326 holding_cell_update_fee: None,
6327 next_holder_htlc_id: 0,
6328 next_counterparty_htlc_id: 0,
6329 update_time_counter: 1,
6331 resend_order: RAACommitmentOrder::CommitmentFirst,
6333 monitor_pending_channel_ready: false,
6334 monitor_pending_revoke_and_ack: false,
6335 monitor_pending_commitment_signed: false,
6336 monitor_pending_forwards: Vec::new(),
6337 monitor_pending_failures: Vec::new(),
6338 monitor_pending_finalized_fulfills: Vec::new(),
6340 signer_pending_commitment_update: false,
6341 signer_pending_funding: false,
6343 #[cfg(debug_assertions)]
6344 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6345 #[cfg(debug_assertions)]
6346 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6348 last_sent_closing_fee: None,
6349 pending_counterparty_closing_signed: None,
6350 expecting_peer_commitment_signed: false,
6351 closing_fee_limits: None,
6352 target_closing_feerate_sats_per_kw: None,
6354 funding_tx_confirmed_in: None,
6355 funding_tx_confirmation_height: 0,
6356 short_channel_id: None,
6357 channel_creation_height: current_chain_height,
6359 feerate_per_kw: commitment_feerate,
6360 counterparty_dust_limit_satoshis: 0,
6361 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6362 counterparty_max_htlc_value_in_flight_msat: 0,
6363 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6364 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6365 holder_selected_channel_reserve_satoshis,
6366 counterparty_htlc_minimum_msat: 0,
6367 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6368 counterparty_max_accepted_htlcs: 0,
6369 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6370 minimum_depth: None, // Filled in in accept_channel
6372 counterparty_forwarding_info: None,
6374 channel_transaction_parameters: ChannelTransactionParameters {
6375 holder_pubkeys: pubkeys,
6376 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6377 is_outbound_from_holder: true,
6378 counterparty_parameters: None,
6379 funding_outpoint: None,
6380 channel_type_features: channel_type.clone()
6382 funding_transaction: None,
6383 is_batch_funding: None,
6385 counterparty_cur_commitment_point: None,
6386 counterparty_prev_commitment_point: None,
6387 counterparty_node_id,
6389 counterparty_shutdown_scriptpubkey: None,
6391 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6393 channel_update_status: ChannelUpdateStatus::Enabled,
6394 closing_signed_in_flight: false,
6396 announcement_sigs: None,
6398 #[cfg(any(test, fuzzing))]
6399 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6400 #[cfg(any(test, fuzzing))]
6401 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6403 workaround_lnd_bug_4006: None,
6404 sent_message_awaiting_response: None,
6406 latest_inbound_scid_alias: None,
6407 outbound_scid_alias,
6409 channel_pending_event_emitted: false,
6410 channel_ready_event_emitted: false,
6412 #[cfg(any(test, fuzzing))]
6413 historical_inbound_htlc_fulfills: HashSet::new(),
6418 blocked_monitor_updates: Vec::new(),
6420 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6424 /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6425 fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6426 let counterparty_keys = self.context.build_remote_transaction_keys();
6427 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6428 let signature = match &self.context.holder_signer {
6429 // TODO (taproot|arik): move match into calling method for Taproot
6430 ChannelSignerType::Ecdsa(ecdsa) => {
6431 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6432 .map(|(sig, _)| sig).ok()?
6434 // TODO (taproot|arik)
6439 if self.context.signer_pending_funding {
6440 log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6441 self.context.signer_pending_funding = false;
6444 Some(msgs::FundingCreated {
6445 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6446 funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6447 funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6450 partial_signature_with_nonce: None,
6452 next_local_nonce: None,
6456 /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6457 /// a funding_created message for the remote peer.
6458 /// Panics if called at some time other than immediately after initial handshake, if called twice,
6459 /// or if called on an inbound channel.
6460 /// Note that channel_id changes during this call!
6461 /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6462 /// If an Err is returned, it is a ChannelError::Close.
6463 pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6464 -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6465 if !self.context.is_outbound() {
6466 panic!("Tried to create outbound funding_created message on an inbound channel!");
6469 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6470 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6472 panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6474 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6475 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6476 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6477 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6480 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6481 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6483 // Now that we're past error-generating stuff, update our local state:
6485 self.context.channel_state = ChannelState::FundingNegotiated;
6486 self.context.channel_id = funding_txo.to_channel_id();
6488 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6489 // We can skip this if it is a zero-conf channel.
6490 if funding_transaction.is_coin_base() &&
6491 self.context.minimum_depth.unwrap_or(0) > 0 &&
6492 self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6493 self.context.minimum_depth = Some(COINBASE_MATURITY);
6496 self.context.funding_transaction = Some(funding_transaction);
6497 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6499 let funding_created = self.get_funding_created_msg(logger);
6500 if funding_created.is_none() {
6501 #[cfg(not(async_signing))] {
6502 panic!("Failed to get signature for new funding creation");
6504 #[cfg(async_signing)] {
6505 if !self.context.signer_pending_funding {
6506 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6507 self.context.signer_pending_funding = true;
6515 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6516 // The default channel type (ie the first one we try) depends on whether the channel is
6517 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6518 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6519 // with no other changes, and fall back to `only_static_remotekey`.
6520 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6521 if !config.channel_handshake_config.announced_channel &&
6522 config.channel_handshake_config.negotiate_scid_privacy &&
6523 their_features.supports_scid_privacy() {
6524 ret.set_scid_privacy_required();
6527 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6528 // set it now. If they don't understand it, we'll fall back to our default of
6529 // `only_static_remotekey`.
6530 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6531 their_features.supports_anchors_zero_fee_htlc_tx() {
6532 ret.set_anchors_zero_fee_htlc_tx_required();
6538 /// If we receive an error message, it may only be a rejection of the channel type we tried,
6539 /// not of our ability to open any channel at all. Thus, on error, we should first call this
6540 /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6541 pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6542 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6543 ) -> Result<msgs::OpenChannel, ()>
6545 F::Target: FeeEstimator
6547 if !self.context.is_outbound() ||
6549 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6550 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6555 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6556 // We've exhausted our options
6559 // We support opening a few different types of channels. Try removing our additional
6560 // features one by one until we've either arrived at our default or the counterparty has
6563 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6564 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6565 // checks whether the counterparty supports every feature, this would only happen if the
6566 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6568 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6569 self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6570 self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6571 assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6572 } else if self.context.channel_type.supports_scid_privacy() {
6573 self.context.channel_type.clear_scid_privacy();
6575 self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6577 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6578 Ok(self.get_open_channel(chain_hash))
6581 pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6582 if !self.context.is_outbound() {
6583 panic!("Tried to open a channel for an inbound channel?");
6585 if self.context.have_received_message() {
6586 panic!("Cannot generate an open_channel after we've moved forward");
6589 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6590 panic!("Tried to send an open_channel for a channel that has already advanced");
6593 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6594 let keys = self.context.get_holder_pubkeys();
6598 temporary_channel_id: self.context.channel_id,
6599 funding_satoshis: self.context.channel_value_satoshis,
6600 push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6601 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6602 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6603 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6604 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6605 feerate_per_kw: self.context.feerate_per_kw as u32,
6606 to_self_delay: self.context.get_holder_selected_contest_delay(),
6607 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6608 funding_pubkey: keys.funding_pubkey,
6609 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6610 payment_point: keys.payment_point,
6611 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6612 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6613 first_per_commitment_point,
6614 channel_flags: if self.context.config.announced_channel {1} else {0},
6615 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6616 Some(script) => script.clone().into_inner(),
6617 None => Builder::new().into_script(),
6619 channel_type: Some(self.context.channel_type.clone()),
6624 pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6625 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6627 // Check sanity of message fields:
6628 if !self.context.is_outbound() {
6629 return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6631 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6632 return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6634 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6635 return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6637 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6638 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6640 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6641 return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6643 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6644 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6645 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6647 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6648 if msg.htlc_minimum_msat >= full_channel_value_msat {
6649 return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6651 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6652 if msg.to_self_delay > max_delay_acceptable {
6653 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6655 if msg.max_accepted_htlcs < 1 {
6656 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6658 if msg.max_accepted_htlcs > MAX_HTLCS {
6659 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6662 // Now check against optional parameters as set by config...
6663 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6664 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6666 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6667 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6669 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6670 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6672 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6673 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6675 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6676 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6678 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6679 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6681 if msg.minimum_depth > peer_limits.max_minimum_depth {
6682 return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6685 if let Some(ty) = &msg.channel_type {
6686 if *ty != self.context.channel_type {
6687 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6689 } else if their_features.supports_channel_type() {
6690 // Assume they've accepted the channel type as they said they understand it.
6692 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6693 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6694 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6696 self.context.channel_type = channel_type.clone();
6697 self.context.channel_transaction_parameters.channel_type_features = channel_type;
6700 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6701 match &msg.shutdown_scriptpubkey {
6702 &Some(ref script) => {
6703 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6704 if script.len() == 0 {
6707 if !script::is_bolt2_compliant(&script, their_features) {
6708 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6710 Some(script.clone())
6713 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6715 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6720 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6721 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6722 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6723 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6724 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6726 if peer_limits.trust_own_funding_0conf {
6727 self.context.minimum_depth = Some(msg.minimum_depth);
6729 self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6732 let counterparty_pubkeys = ChannelPublicKeys {
6733 funding_pubkey: msg.funding_pubkey,
6734 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6735 payment_point: msg.payment_point,
6736 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6737 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6740 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6741 selected_contest_delay: msg.to_self_delay,
6742 pubkeys: counterparty_pubkeys,
6745 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6746 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6748 self.context.channel_state = ChannelState::NegotiatingFunding(
6749 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6751 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6756 /// Handles a funding_signed message from the remote end.
6757 /// If this call is successful, broadcast the funding transaction (and not before!)
6758 pub fn funding_signed<L: Deref>(
6759 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6760 ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6764 if !self.context.is_outbound() {
6765 return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6767 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6768 return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6770 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6771 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6772 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6773 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6776 let funding_script = self.context.get_funding_redeemscript();
6778 let counterparty_keys = self.context.build_remote_transaction_keys();
6779 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6780 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6781 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6783 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6784 &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6786 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6787 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6789 let trusted_tx = initial_commitment_tx.trust();
6790 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6791 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6792 // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6793 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6794 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6798 let holder_commitment_tx = HolderCommitmentTransaction::new(
6799 initial_commitment_tx,
6802 &self.context.get_holder_pubkeys().funding_pubkey,
6803 self.context.counterparty_funding_pubkey()
6807 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6808 if validated.is_err() {
6809 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6812 let funding_redeemscript = self.context.get_funding_redeemscript();
6813 let funding_txo = self.context.get_funding_txo().unwrap();
6814 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6815 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6816 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6817 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6818 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6819 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6820 shutdown_script, self.context.get_holder_selected_contest_delay(),
6821 &self.context.destination_script, (funding_txo, funding_txo_script),
6822 &self.context.channel_transaction_parameters,
6823 funding_redeemscript.clone(), self.context.channel_value_satoshis,
6825 holder_commitment_tx, best_block, self.context.counterparty_node_id);
6826 channel_monitor.provide_initial_counterparty_commitment_tx(
6827 counterparty_initial_bitcoin_tx.txid, Vec::new(),
6828 self.context.cur_counterparty_commitment_transaction_number,
6829 self.context.counterparty_cur_commitment_point.unwrap(),
6830 counterparty_initial_commitment_tx.feerate_per_kw(),
6831 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6832 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6834 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6835 if self.context.is_batch_funding() {
6836 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6838 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6840 self.context.cur_holder_commitment_transaction_number -= 1;
6841 self.context.cur_counterparty_commitment_transaction_number -= 1;
6843 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6845 let mut channel = Channel { context: self.context };
6847 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6848 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6849 Ok((channel, channel_monitor))
6852 /// Indicates that the signer may have some signatures for us, so we should retry if we're
6854 #[cfg(async_signing)]
6855 pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6856 if self.context.signer_pending_funding && self.context.is_outbound() {
6857 log_trace!(logger, "Signer unblocked a funding_created");
6858 self.get_funding_created_msg(logger)
6863 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6864 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6865 pub context: ChannelContext<SP>,
6866 pub unfunded_context: UnfundedChannelContext,
6869 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
6870 /// [`msgs::OpenChannel`].
6871 pub(super) fn channel_type_from_open_channel(
6872 msg: &msgs::OpenChannel, their_features: &InitFeatures,
6873 our_supported_features: &ChannelTypeFeatures
6874 ) -> Result<ChannelTypeFeatures, ChannelError> {
6875 if let Some(channel_type) = &msg.channel_type {
6876 if channel_type.supports_any_optional_bits() {
6877 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6880 // We only support the channel types defined by the `ChannelManager` in
6881 // `provided_channel_type_features`. The channel type must always support
6882 // `static_remote_key`.
6883 if !channel_type.requires_static_remote_key() {
6884 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6886 // Make sure we support all of the features behind the channel type.
6887 if !channel_type.is_subset(our_supported_features) {
6888 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6890 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6891 if channel_type.requires_scid_privacy() && announced_channel {
6892 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6894 Ok(channel_type.clone())
6896 let channel_type = ChannelTypeFeatures::from_init(&their_features);
6897 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6898 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6904 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6905 /// Creates a new channel from a remote sides' request for one.
6906 /// Assumes chain_hash has already been checked and corresponds with what we expect!
6907 pub fn new<ES: Deref, F: Deref, L: Deref>(
6908 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6909 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6910 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6911 current_chain_height: u32, logger: &L, is_0conf: bool,
6912 ) -> Result<InboundV1Channel<SP>, ChannelError>
6913 where ES::Target: EntropySource,
6914 F::Target: FeeEstimator,
6917 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6918 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6920 // First check the channel type is known, failing before we do anything else if we don't
6921 // support this channel type.
6922 let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
6924 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6925 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6926 let pubkeys = holder_signer.pubkeys().clone();
6927 let counterparty_pubkeys = ChannelPublicKeys {
6928 funding_pubkey: msg.funding_pubkey,
6929 revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6930 payment_point: msg.payment_point,
6931 delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6932 htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6935 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6936 return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6939 // Check sanity of message fields:
6940 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6941 return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6943 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6944 return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6946 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6947 return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6949 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6950 if msg.push_msat > full_channel_value_msat {
6951 return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6953 if msg.dust_limit_satoshis > msg.funding_satoshis {
6954 return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6956 if msg.htlc_minimum_msat >= full_channel_value_msat {
6957 return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6959 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6961 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6962 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6963 return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6965 if msg.max_accepted_htlcs < 1 {
6966 return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6968 if msg.max_accepted_htlcs > MAX_HTLCS {
6969 return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6972 // Now check against optional parameters as set by config...
6973 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6974 return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6976 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6977 return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
6979 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6980 return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6982 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6983 return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6985 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6986 return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6988 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6989 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6991 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6992 return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6995 // Convert things into internal flags and prep our state:
6997 if config.channel_handshake_limits.force_announced_channel_preference {
6998 if config.channel_handshake_config.announced_channel != announced_channel {
6999 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
7003 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
7004 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7005 // Protocol level safety check in place, although it should never happen because
7006 // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7007 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7009 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
7010 return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
7012 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7013 log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
7014 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
7016 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
7017 return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
7020 // check if the funder's amount for the initial commitment tx is sufficient
7021 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
7022 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
7023 ANCHOR_OUTPUT_VALUE_SATOSHI * 2
7027 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
7028 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
7029 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
7030 return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
7033 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
7034 // While it's reasonable for us to not meet the channel reserve initially (if they don't
7035 // want to push much to us), our counterparty should always have more than our reserve.
7036 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
7037 return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
7040 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7041 match &msg.shutdown_scriptpubkey {
7042 &Some(ref script) => {
7043 // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7044 if script.len() == 0 {
7047 if !script::is_bolt2_compliant(&script, their_features) {
7048 return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
7050 Some(script.clone())
7053 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7055 return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7060 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
7061 match signer_provider.get_shutdown_scriptpubkey() {
7062 Ok(scriptpubkey) => Some(scriptpubkey),
7063 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
7067 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
7068 if !shutdown_scriptpubkey.is_compatible(&their_features) {
7069 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
7073 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
7074 Ok(script) => script,
7075 Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
7078 let mut secp_ctx = Secp256k1::new();
7079 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
7081 let minimum_depth = if is_0conf {
7084 Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
7088 context: ChannelContext {
7091 config: LegacyChannelConfig {
7092 options: config.channel_config.clone(),
7094 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7099 inbound_handshake_limits_override: None,
7101 temporary_channel_id: Some(msg.temporary_channel_id),
7102 channel_id: msg.temporary_channel_id,
7103 channel_state: ChannelState::NegotiatingFunding(
7104 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7106 announcement_sigs_state: AnnouncementSigsState::NotSent,
7109 latest_monitor_update_id: 0,
7111 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7112 shutdown_scriptpubkey,
7115 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7116 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7117 value_to_self_msat: msg.push_msat,
7119 pending_inbound_htlcs: Vec::new(),
7120 pending_outbound_htlcs: Vec::new(),
7121 holding_cell_htlc_updates: Vec::new(),
7122 pending_update_fee: None,
7123 holding_cell_update_fee: None,
7124 next_holder_htlc_id: 0,
7125 next_counterparty_htlc_id: 0,
7126 update_time_counter: 1,
7128 resend_order: RAACommitmentOrder::CommitmentFirst,
7130 monitor_pending_channel_ready: false,
7131 monitor_pending_revoke_and_ack: false,
7132 monitor_pending_commitment_signed: false,
7133 monitor_pending_forwards: Vec::new(),
7134 monitor_pending_failures: Vec::new(),
7135 monitor_pending_finalized_fulfills: Vec::new(),
7137 signer_pending_commitment_update: false,
7138 signer_pending_funding: false,
7140 #[cfg(debug_assertions)]
7141 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7142 #[cfg(debug_assertions)]
7143 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7145 last_sent_closing_fee: None,
7146 pending_counterparty_closing_signed: None,
7147 expecting_peer_commitment_signed: false,
7148 closing_fee_limits: None,
7149 target_closing_feerate_sats_per_kw: None,
7151 funding_tx_confirmed_in: None,
7152 funding_tx_confirmation_height: 0,
7153 short_channel_id: None,
7154 channel_creation_height: current_chain_height,
7156 feerate_per_kw: msg.feerate_per_kw,
7157 channel_value_satoshis: msg.funding_satoshis,
7158 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7159 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7160 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7161 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7162 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7163 holder_selected_channel_reserve_satoshis,
7164 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7165 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7166 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7167 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7170 counterparty_forwarding_info: None,
7172 channel_transaction_parameters: ChannelTransactionParameters {
7173 holder_pubkeys: pubkeys,
7174 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7175 is_outbound_from_holder: false,
7176 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7177 selected_contest_delay: msg.to_self_delay,
7178 pubkeys: counterparty_pubkeys,
7180 funding_outpoint: None,
7181 channel_type_features: channel_type.clone()
7183 funding_transaction: None,
7184 is_batch_funding: None,
7186 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7187 counterparty_prev_commitment_point: None,
7188 counterparty_node_id,
7190 counterparty_shutdown_scriptpubkey,
7192 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7194 channel_update_status: ChannelUpdateStatus::Enabled,
7195 closing_signed_in_flight: false,
7197 announcement_sigs: None,
7199 #[cfg(any(test, fuzzing))]
7200 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7201 #[cfg(any(test, fuzzing))]
7202 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7204 workaround_lnd_bug_4006: None,
7205 sent_message_awaiting_response: None,
7207 latest_inbound_scid_alias: None,
7208 outbound_scid_alias: 0,
7210 channel_pending_event_emitted: false,
7211 channel_ready_event_emitted: false,
7213 #[cfg(any(test, fuzzing))]
7214 historical_inbound_htlc_fulfills: HashSet::new(),
7219 blocked_monitor_updates: Vec::new(),
7221 unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7227 /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7228 /// should be sent back to the counterparty node.
7230 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7231 pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7232 if self.context.is_outbound() {
7233 panic!("Tried to send accept_channel for an outbound channel?");
7236 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7237 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7239 panic!("Tried to send accept_channel after channel had moved forward");
7241 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7242 panic!("Tried to send an accept_channel for a channel that has already advanced");
7245 self.generate_accept_channel_message()
7248 /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7249 /// inbound channel. If the intention is to accept an inbound channel, use
7250 /// [`InboundV1Channel::accept_inbound_channel`] instead.
7252 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7253 fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7254 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7255 let keys = self.context.get_holder_pubkeys();
7257 msgs::AcceptChannel {
7258 temporary_channel_id: self.context.channel_id,
7259 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7260 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7261 channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7262 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7263 minimum_depth: self.context.minimum_depth.unwrap(),
7264 to_self_delay: self.context.get_holder_selected_contest_delay(),
7265 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7266 funding_pubkey: keys.funding_pubkey,
7267 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7268 payment_point: keys.payment_point,
7269 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7270 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7271 first_per_commitment_point,
7272 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7273 Some(script) => script.clone().into_inner(),
7274 None => Builder::new().into_script(),
7276 channel_type: Some(self.context.channel_type.clone()),
7278 next_local_nonce: None,
7282 /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7283 /// inbound channel without accepting it.
7285 /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7287 pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7288 self.generate_accept_channel_message()
7291 fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7292 let funding_script = self.context.get_funding_redeemscript();
7294 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7295 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7296 let trusted_tx = initial_commitment_tx.trust();
7297 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7298 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7299 // They sign the holder commitment transaction...
7300 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7301 log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7302 encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7303 encode::serialize_hex(&funding_script), &self.context.channel_id());
7304 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7306 Ok(initial_commitment_tx)
7309 pub fn funding_created<L: Deref>(
7310 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7311 ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7315 if self.context.is_outbound() {
7316 return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7319 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7320 if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7322 // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7323 // remember the channel, so it's safe to just send an error_message here and drop the
7325 return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7327 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7328 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7329 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7330 panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7333 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7334 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7335 // This is an externally observable change before we finish all our checks. In particular
7336 // check_funding_created_signature may fail.
7337 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7339 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7341 Err(ChannelError::Close(e)) => {
7342 self.context.channel_transaction_parameters.funding_outpoint = None;
7343 return Err((self, ChannelError::Close(e)));
7346 // The only error we know how to handle is ChannelError::Close, so we fall over here
7347 // to make sure we don't continue with an inconsistent state.
7348 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7352 let holder_commitment_tx = HolderCommitmentTransaction::new(
7353 initial_commitment_tx,
7356 &self.context.get_holder_pubkeys().funding_pubkey,
7357 self.context.counterparty_funding_pubkey()
7360 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7361 return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7364 // Now that we're past error-generating stuff, update our local state:
7366 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7367 self.context.channel_id = funding_txo.to_channel_id();
7368 self.context.cur_counterparty_commitment_transaction_number -= 1;
7369 self.context.cur_holder_commitment_transaction_number -= 1;
7371 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7373 let funding_redeemscript = self.context.get_funding_redeemscript();
7374 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7375 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7376 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7377 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7378 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7379 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7380 shutdown_script, self.context.get_holder_selected_contest_delay(),
7381 &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7382 &self.context.channel_transaction_parameters,
7383 funding_redeemscript.clone(), self.context.channel_value_satoshis,
7385 holder_commitment_tx, best_block, self.context.counterparty_node_id);
7386 channel_monitor.provide_initial_counterparty_commitment_tx(
7387 counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7388 self.context.cur_counterparty_commitment_transaction_number + 1,
7389 self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7390 counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7391 counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7393 log_info!(logger, "{} funding_signed for peer for channel {}",
7394 if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7396 // Promote the channel to a full-fledged one now that we have updated the state and have a
7397 // `ChannelMonitor`.
7398 let mut channel = Channel {
7399 context: self.context,
7401 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7402 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7404 Ok((channel, funding_signed, channel_monitor))
7408 const SERIALIZATION_VERSION: u8 = 3;
7409 const MIN_SERIALIZATION_VERSION: u8 = 3;
7411 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7417 impl Writeable for ChannelUpdateStatus {
7418 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7419 // We only care about writing out the current state as it was announced, ie only either
7420 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7421 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7423 ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7424 ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7425 ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7426 ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7432 impl Readable for ChannelUpdateStatus {
7433 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7434 Ok(match <u8 as Readable>::read(reader)? {
7435 0 => ChannelUpdateStatus::Enabled,
7436 1 => ChannelUpdateStatus::Disabled,
7437 _ => return Err(DecodeError::InvalidValue),
7442 impl Writeable for AnnouncementSigsState {
7443 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7444 // We only care about writing out the current state as if we had just disconnected, at
7445 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7447 AnnouncementSigsState::NotSent => 0u8.write(writer),
7448 AnnouncementSigsState::MessageSent => 0u8.write(writer),
7449 AnnouncementSigsState::Committed => 0u8.write(writer),
7450 AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7455 impl Readable for AnnouncementSigsState {
7456 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7457 Ok(match <u8 as Readable>::read(reader)? {
7458 0 => AnnouncementSigsState::NotSent,
7459 1 => AnnouncementSigsState::PeerReceived,
7460 _ => return Err(DecodeError::InvalidValue),
7465 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7466 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7467 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7470 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7472 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7473 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7474 // the low bytes now and the optional high bytes later.
7475 let user_id_low = self.context.user_id as u64;
7476 user_id_low.write(writer)?;
7478 // Version 1 deserializers expected to read parts of the config object here. Version 2
7479 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7480 // `minimum_depth` we simply write dummy values here.
7481 writer.write_all(&[0; 8])?;
7483 self.context.channel_id.write(writer)?;
7485 let mut channel_state = self.context.channel_state;
7486 if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7487 channel_state.set_peer_disconnected();
7489 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
7491 channel_state.to_u32().write(writer)?;
7493 self.context.channel_value_satoshis.write(writer)?;
7495 self.context.latest_monitor_update_id.write(writer)?;
7497 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7498 // deserialized from that format.
7499 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7500 Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7501 None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7503 self.context.destination_script.write(writer)?;
7505 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7506 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7507 self.context.value_to_self_msat.write(writer)?;
7509 let mut dropped_inbound_htlcs = 0;
7510 for htlc in self.context.pending_inbound_htlcs.iter() {
7511 if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7512 dropped_inbound_htlcs += 1;
7515 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7516 for htlc in self.context.pending_inbound_htlcs.iter() {
7517 if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7520 htlc.htlc_id.write(writer)?;
7521 htlc.amount_msat.write(writer)?;
7522 htlc.cltv_expiry.write(writer)?;
7523 htlc.payment_hash.write(writer)?;
7525 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7526 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7528 htlc_state.write(writer)?;
7530 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7532 htlc_state.write(writer)?;
7534 &InboundHTLCState::Committed => {
7537 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7539 removal_reason.write(writer)?;
7544 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7545 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7546 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7548 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7549 for htlc in self.context.pending_outbound_htlcs.iter() {
7550 htlc.htlc_id.write(writer)?;
7551 htlc.amount_msat.write(writer)?;
7552 htlc.cltv_expiry.write(writer)?;
7553 htlc.payment_hash.write(writer)?;
7554 htlc.source.write(writer)?;
7556 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7558 onion_packet.write(writer)?;
7560 &OutboundHTLCState::Committed => {
7563 &OutboundHTLCState::RemoteRemoved(_) => {
7564 // Treat this as a Committed because we haven't received the CS - they'll
7565 // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7568 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7570 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7571 preimages.push(preimage);
7573 let reason: Option<&HTLCFailReason> = outcome.into();
7574 reason.write(writer)?;
7576 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7578 if let OutboundHTLCOutcome::Success(preimage) = outcome {
7579 preimages.push(preimage);
7581 let reason: Option<&HTLCFailReason> = outcome.into();
7582 reason.write(writer)?;
7585 pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7586 pending_outbound_blinding_points.push(htlc.blinding_point);
7589 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7590 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7591 // Vec of (htlc_id, failure_code, sha256_of_onion)
7592 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7593 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7594 for update in self.context.holding_cell_htlc_updates.iter() {
7596 &HTLCUpdateAwaitingACK::AddHTLC {
7597 ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7598 blinding_point, skimmed_fee_msat,
7601 amount_msat.write(writer)?;
7602 cltv_expiry.write(writer)?;
7603 payment_hash.write(writer)?;
7604 source.write(writer)?;
7605 onion_routing_packet.write(writer)?;
7607 holding_cell_skimmed_fees.push(skimmed_fee_msat);
7608 holding_cell_blinding_points.push(blinding_point);
7610 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7612 payment_preimage.write(writer)?;
7613 htlc_id.write(writer)?;
7615 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7617 htlc_id.write(writer)?;
7618 err_packet.write(writer)?;
7620 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7621 htlc_id, failure_code, sha256_of_onion
7623 // We don't want to break downgrading by adding a new variant, so write a dummy
7624 // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7625 malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7627 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7629 htlc_id.write(writer)?;
7630 dummy_err_packet.write(writer)?;
7635 match self.context.resend_order {
7636 RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7637 RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7640 self.context.monitor_pending_channel_ready.write(writer)?;
7641 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7642 self.context.monitor_pending_commitment_signed.write(writer)?;
7644 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7645 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7646 pending_forward.write(writer)?;
7647 htlc_id.write(writer)?;
7650 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7651 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7652 htlc_source.write(writer)?;
7653 payment_hash.write(writer)?;
7654 fail_reason.write(writer)?;
7657 if self.context.is_outbound() {
7658 self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7659 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7660 Some(feerate).write(writer)?;
7662 // As for inbound HTLCs, if the update was only announced and never committed in a
7663 // commitment_signed, drop it.
7664 None::<u32>.write(writer)?;
7666 self.context.holding_cell_update_fee.write(writer)?;
7668 self.context.next_holder_htlc_id.write(writer)?;
7669 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7670 self.context.update_time_counter.write(writer)?;
7671 self.context.feerate_per_kw.write(writer)?;
7673 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7674 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7675 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7676 // consider the stale state on reload.
7679 self.context.funding_tx_confirmed_in.write(writer)?;
7680 self.context.funding_tx_confirmation_height.write(writer)?;
7681 self.context.short_channel_id.write(writer)?;
7683 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7684 self.context.holder_dust_limit_satoshis.write(writer)?;
7685 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7687 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7688 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7690 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7691 self.context.holder_htlc_minimum_msat.write(writer)?;
7692 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7694 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7695 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7697 match &self.context.counterparty_forwarding_info {
7700 info.fee_base_msat.write(writer)?;
7701 info.fee_proportional_millionths.write(writer)?;
7702 info.cltv_expiry_delta.write(writer)?;
7704 None => 0u8.write(writer)?
7707 self.context.channel_transaction_parameters.write(writer)?;
7708 self.context.funding_transaction.write(writer)?;
7710 self.context.counterparty_cur_commitment_point.write(writer)?;
7711 self.context.counterparty_prev_commitment_point.write(writer)?;
7712 self.context.counterparty_node_id.write(writer)?;
7714 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7716 self.context.commitment_secrets.write(writer)?;
7718 self.context.channel_update_status.write(writer)?;
7720 #[cfg(any(test, fuzzing))]
7721 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7722 #[cfg(any(test, fuzzing))]
7723 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7724 htlc.write(writer)?;
7727 // If the channel type is something other than only-static-remote-key, then we need to have
7728 // older clients fail to deserialize this channel at all. If the type is
7729 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7731 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7732 Some(&self.context.channel_type) } else { None };
7734 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7735 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7736 // a different percentage of the channel value then 10%, which older versions of LDK used
7737 // to set it to before the percentage was made configurable.
7738 let serialized_holder_selected_reserve =
7739 if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7740 { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7742 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7743 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7744 let serialized_holder_htlc_max_in_flight =
7745 if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7746 { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7748 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7749 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7751 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7752 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7753 // we write the high bytes as an option here.
7754 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7756 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7758 write_tlv_fields!(writer, {
7759 (0, self.context.announcement_sigs, option),
7760 // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7761 // default value instead of being Option<>al. Thus, to maintain compatibility we write
7762 // them twice, once with their original default values above, and once as an option
7763 // here. On the read side, old versions will simply ignore the odd-type entries here,
7764 // and new versions map the default values to None and allow the TLV entries here to
7766 (1, self.context.minimum_depth, option),
7767 (2, chan_type, option),
7768 (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7769 (4, serialized_holder_selected_reserve, option),
7770 (5, self.context.config, required),
7771 (6, serialized_holder_htlc_max_in_flight, option),
7772 (7, self.context.shutdown_scriptpubkey, option),
7773 (8, self.context.blocked_monitor_updates, optional_vec),
7774 (9, self.context.target_closing_feerate_sats_per_kw, option),
7775 (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7776 (13, self.context.channel_creation_height, required),
7777 (15, preimages, required_vec),
7778 (17, self.context.announcement_sigs_state, required),
7779 (19, self.context.latest_inbound_scid_alias, option),
7780 (21, self.context.outbound_scid_alias, required),
7781 (23, channel_ready_event_emitted, option),
7782 (25, user_id_high_opt, option),
7783 (27, self.context.channel_keys_id, required),
7784 (28, holder_max_accepted_htlcs, option),
7785 (29, self.context.temporary_channel_id, option),
7786 (31, channel_pending_event_emitted, option),
7787 (35, pending_outbound_skimmed_fees, optional_vec),
7788 (37, holding_cell_skimmed_fees, optional_vec),
7789 (38, self.context.is_batch_funding, option),
7790 (39, pending_outbound_blinding_points, optional_vec),
7791 (41, holding_cell_blinding_points, optional_vec),
7792 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7799 const MAX_ALLOC_SIZE: usize = 64*1024;
7800 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7802 ES::Target: EntropySource,
7803 SP::Target: SignerProvider
7805 fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7806 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7807 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7809 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7810 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7811 // the low bytes now and the high bytes later.
7812 let user_id_low: u64 = Readable::read(reader)?;
7814 let mut config = Some(LegacyChannelConfig::default());
7816 // Read the old serialization of the ChannelConfig from version 0.0.98.
7817 config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7818 config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7819 config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7820 config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7822 // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7823 let mut _val: u64 = Readable::read(reader)?;
7826 let channel_id = Readable::read(reader)?;
7827 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7828 let channel_value_satoshis = Readable::read(reader)?;
7830 let latest_monitor_update_id = Readable::read(reader)?;
7832 let mut keys_data = None;
7834 // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7835 // the `channel_keys_id` TLV is present below.
7836 let keys_len: u32 = Readable::read(reader)?;
7837 keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7838 while keys_data.as_ref().unwrap().len() != keys_len as usize {
7839 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7840 let mut data = [0; 1024];
7841 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7842 reader.read_exact(read_slice)?;
7843 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7847 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7848 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7849 Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7852 let destination_script = Readable::read(reader)?;
7854 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7855 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7856 let value_to_self_msat = Readable::read(reader)?;
7858 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7860 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7861 for _ in 0..pending_inbound_htlc_count {
7862 pending_inbound_htlcs.push(InboundHTLCOutput {
7863 htlc_id: Readable::read(reader)?,
7864 amount_msat: Readable::read(reader)?,
7865 cltv_expiry: Readable::read(reader)?,
7866 payment_hash: Readable::read(reader)?,
7867 state: match <u8 as Readable>::read(reader)? {
7868 1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7869 2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7870 3 => InboundHTLCState::Committed,
7871 4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7872 _ => return Err(DecodeError::InvalidValue),
7877 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7878 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7879 for _ in 0..pending_outbound_htlc_count {
7880 pending_outbound_htlcs.push(OutboundHTLCOutput {
7881 htlc_id: Readable::read(reader)?,
7882 amount_msat: Readable::read(reader)?,
7883 cltv_expiry: Readable::read(reader)?,
7884 payment_hash: Readable::read(reader)?,
7885 source: Readable::read(reader)?,
7886 state: match <u8 as Readable>::read(reader)? {
7887 0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7888 1 => OutboundHTLCState::Committed,
7890 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7891 OutboundHTLCState::RemoteRemoved(option.into())
7894 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7895 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7898 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7899 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7901 _ => return Err(DecodeError::InvalidValue),
7903 skimmed_fee_msat: None,
7904 blinding_point: None,
7908 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7909 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7910 for _ in 0..holding_cell_htlc_update_count {
7911 holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7912 0 => HTLCUpdateAwaitingACK::AddHTLC {
7913 amount_msat: Readable::read(reader)?,
7914 cltv_expiry: Readable::read(reader)?,
7915 payment_hash: Readable::read(reader)?,
7916 source: Readable::read(reader)?,
7917 onion_routing_packet: Readable::read(reader)?,
7918 skimmed_fee_msat: None,
7919 blinding_point: None,
7921 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7922 payment_preimage: Readable::read(reader)?,
7923 htlc_id: Readable::read(reader)?,
7925 2 => HTLCUpdateAwaitingACK::FailHTLC {
7926 htlc_id: Readable::read(reader)?,
7927 err_packet: Readable::read(reader)?,
7929 _ => return Err(DecodeError::InvalidValue),
7933 let resend_order = match <u8 as Readable>::read(reader)? {
7934 0 => RAACommitmentOrder::CommitmentFirst,
7935 1 => RAACommitmentOrder::RevokeAndACKFirst,
7936 _ => return Err(DecodeError::InvalidValue),
7939 let monitor_pending_channel_ready = Readable::read(reader)?;
7940 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7941 let monitor_pending_commitment_signed = Readable::read(reader)?;
7943 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7944 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7945 for _ in 0..monitor_pending_forwards_count {
7946 monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7949 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7950 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7951 for _ in 0..monitor_pending_failures_count {
7952 monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7955 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7957 let holding_cell_update_fee = Readable::read(reader)?;
7959 let next_holder_htlc_id = Readable::read(reader)?;
7960 let next_counterparty_htlc_id = Readable::read(reader)?;
7961 let update_time_counter = Readable::read(reader)?;
7962 let feerate_per_kw = Readable::read(reader)?;
7964 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7965 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7966 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7967 // consider the stale state on reload.
7968 match <u8 as Readable>::read(reader)? {
7971 let _: u32 = Readable::read(reader)?;
7972 let _: u64 = Readable::read(reader)?;
7973 let _: Signature = Readable::read(reader)?;
7975 _ => return Err(DecodeError::InvalidValue),
7978 let funding_tx_confirmed_in = Readable::read(reader)?;
7979 let funding_tx_confirmation_height = Readable::read(reader)?;
7980 let short_channel_id = Readable::read(reader)?;
7982 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7983 let holder_dust_limit_satoshis = Readable::read(reader)?;
7984 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7985 let mut counterparty_selected_channel_reserve_satoshis = None;
7987 // Read the old serialization from version 0.0.98.
7988 counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7990 // Read the 8 bytes of backwards-compatibility data.
7991 let _dummy: u64 = Readable::read(reader)?;
7993 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7994 let holder_htlc_minimum_msat = Readable::read(reader)?;
7995 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7997 let mut minimum_depth = None;
7999 // Read the old serialization from version 0.0.98.
8000 minimum_depth = Some(Readable::read(reader)?);
8002 // Read the 4 bytes of backwards-compatibility data.
8003 let _dummy: u32 = Readable::read(reader)?;
8006 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
8008 1 => Some(CounterpartyForwardingInfo {
8009 fee_base_msat: Readable::read(reader)?,
8010 fee_proportional_millionths: Readable::read(reader)?,
8011 cltv_expiry_delta: Readable::read(reader)?,
8013 _ => return Err(DecodeError::InvalidValue),
8016 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8017 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8019 let counterparty_cur_commitment_point = Readable::read(reader)?;
8021 let counterparty_prev_commitment_point = Readable::read(reader)?;
8022 let counterparty_node_id = Readable::read(reader)?;
8024 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8025 let commitment_secrets = Readable::read(reader)?;
8027 let channel_update_status = Readable::read(reader)?;
8029 #[cfg(any(test, fuzzing))]
8030 let mut historical_inbound_htlc_fulfills = HashSet::new();
8031 #[cfg(any(test, fuzzing))]
8033 let htlc_fulfills_len: u64 = Readable::read(reader)?;
8034 for _ in 0..htlc_fulfills_len {
8035 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8039 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8040 Some((feerate, if channel_parameters.is_outbound_from_holder {
8041 FeeUpdateState::Outbound
8043 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8049 let mut announcement_sigs = None;
8050 let mut target_closing_feerate_sats_per_kw = None;
8051 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8052 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8053 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8054 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8055 // only, so we default to that if none was written.
8056 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8057 let mut channel_creation_height = Some(serialized_height);
8058 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8060 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8061 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8062 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8063 let mut latest_inbound_scid_alias = None;
8064 let mut outbound_scid_alias = None;
8065 let mut channel_pending_event_emitted = None;
8066 let mut channel_ready_event_emitted = None;
8068 let mut user_id_high_opt: Option<u64> = None;
8069 let mut channel_keys_id: Option<[u8; 32]> = None;
8070 let mut temporary_channel_id: Option<ChannelId> = None;
8071 let mut holder_max_accepted_htlcs: Option<u16> = None;
8073 let mut blocked_monitor_updates = Some(Vec::new());
8075 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8076 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8078 let mut is_batch_funding: Option<()> = None;
8080 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8081 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8083 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8085 read_tlv_fields!(reader, {
8086 (0, announcement_sigs, option),
8087 (1, minimum_depth, option),
8088 (2, channel_type, option),
8089 (3, counterparty_selected_channel_reserve_satoshis, option),
8090 (4, holder_selected_channel_reserve_satoshis, option),
8091 (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8092 (6, holder_max_htlc_value_in_flight_msat, option),
8093 (7, shutdown_scriptpubkey, option),
8094 (8, blocked_monitor_updates, optional_vec),
8095 (9, target_closing_feerate_sats_per_kw, option),
8096 (11, monitor_pending_finalized_fulfills, optional_vec),
8097 (13, channel_creation_height, option),
8098 (15, preimages_opt, optional_vec),
8099 (17, announcement_sigs_state, option),
8100 (19, latest_inbound_scid_alias, option),
8101 (21, outbound_scid_alias, option),
8102 (23, channel_ready_event_emitted, option),
8103 (25, user_id_high_opt, option),
8104 (27, channel_keys_id, option),
8105 (28, holder_max_accepted_htlcs, option),
8106 (29, temporary_channel_id, option),
8107 (31, channel_pending_event_emitted, option),
8108 (35, pending_outbound_skimmed_fees_opt, optional_vec),
8109 (37, holding_cell_skimmed_fees_opt, optional_vec),
8110 (38, is_batch_funding, option),
8111 (39, pending_outbound_blinding_points_opt, optional_vec),
8112 (41, holding_cell_blinding_points_opt, optional_vec),
8113 (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8116 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8117 let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8118 // If we've gotten to the funding stage of the channel, populate the signer with its
8119 // required channel parameters.
8120 if channel_state >= ChannelState::FundingNegotiated {
8121 holder_signer.provide_channel_parameters(&channel_parameters);
8123 (channel_keys_id, holder_signer)
8125 // `keys_data` can be `None` if we had corrupted data.
8126 let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8127 let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8128 (holder_signer.channel_keys_id(), holder_signer)
8131 if let Some(preimages) = preimages_opt {
8132 let mut iter = preimages.into_iter();
8133 for htlc in pending_outbound_htlcs.iter_mut() {
8135 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8136 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8138 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8139 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8144 // We expect all preimages to be consumed above
8145 if iter.next().is_some() {
8146 return Err(DecodeError::InvalidValue);
8150 let chan_features = channel_type.as_ref().unwrap();
8151 if !chan_features.is_subset(our_supported_features) {
8152 // If the channel was written by a new version and negotiated with features we don't
8153 // understand yet, refuse to read it.
8154 return Err(DecodeError::UnknownRequiredFeature);
8157 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8158 // To account for that, we're proactively setting/overriding the field here.
8159 channel_parameters.channel_type_features = chan_features.clone();
8161 let mut secp_ctx = Secp256k1::new();
8162 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8164 // `user_id` used to be a single u64 value. In order to remain backwards
8165 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8166 // separate u64 values.
8167 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8169 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8171 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8172 let mut iter = skimmed_fees.into_iter();
8173 for htlc in pending_outbound_htlcs.iter_mut() {
8174 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8176 // We expect all skimmed fees to be consumed above
8177 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8179 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8180 let mut iter = skimmed_fees.into_iter();
8181 for htlc in holding_cell_htlc_updates.iter_mut() {
8182 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8183 *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8186 // We expect all skimmed fees to be consumed above
8187 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8189 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8190 let mut iter = blinding_pts.into_iter();
8191 for htlc in pending_outbound_htlcs.iter_mut() {
8192 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8194 // We expect all blinding points to be consumed above
8195 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8197 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8198 let mut iter = blinding_pts.into_iter();
8199 for htlc in holding_cell_htlc_updates.iter_mut() {
8200 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8201 *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8204 // We expect all blinding points to be consumed above
8205 if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8208 if let Some(malformed_htlcs) = malformed_htlcs {
8209 for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8210 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8211 if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8212 let matches = *htlc_id == malformed_htlc_id;
8213 if matches { debug_assert!(err_packet.data.is_empty()) }
8216 }).ok_or(DecodeError::InvalidValue)?;
8217 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8218 htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8220 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8225 context: ChannelContext {
8228 config: config.unwrap(),
8232 // Note that we don't care about serializing handshake limits as we only ever serialize
8233 // channel data after the handshake has completed.
8234 inbound_handshake_limits_override: None,
8237 temporary_channel_id,
8239 announcement_sigs_state: announcement_sigs_state.unwrap(),
8241 channel_value_satoshis,
8243 latest_monitor_update_id,
8245 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8246 shutdown_scriptpubkey,
8249 cur_holder_commitment_transaction_number,
8250 cur_counterparty_commitment_transaction_number,
8253 holder_max_accepted_htlcs,
8254 pending_inbound_htlcs,
8255 pending_outbound_htlcs,
8256 holding_cell_htlc_updates,
8260 monitor_pending_channel_ready,
8261 monitor_pending_revoke_and_ack,
8262 monitor_pending_commitment_signed,
8263 monitor_pending_forwards,
8264 monitor_pending_failures,
8265 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8267 signer_pending_commitment_update: false,
8268 signer_pending_funding: false,
8271 holding_cell_update_fee,
8272 next_holder_htlc_id,
8273 next_counterparty_htlc_id,
8274 update_time_counter,
8277 #[cfg(debug_assertions)]
8278 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8279 #[cfg(debug_assertions)]
8280 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8282 last_sent_closing_fee: None,
8283 pending_counterparty_closing_signed: None,
8284 expecting_peer_commitment_signed: false,
8285 closing_fee_limits: None,
8286 target_closing_feerate_sats_per_kw,
8288 funding_tx_confirmed_in,
8289 funding_tx_confirmation_height,
8291 channel_creation_height: channel_creation_height.unwrap(),
8293 counterparty_dust_limit_satoshis,
8294 holder_dust_limit_satoshis,
8295 counterparty_max_htlc_value_in_flight_msat,
8296 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8297 counterparty_selected_channel_reserve_satoshis,
8298 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8299 counterparty_htlc_minimum_msat,
8300 holder_htlc_minimum_msat,
8301 counterparty_max_accepted_htlcs,
8304 counterparty_forwarding_info,
8306 channel_transaction_parameters: channel_parameters,
8307 funding_transaction,
8310 counterparty_cur_commitment_point,
8311 counterparty_prev_commitment_point,
8312 counterparty_node_id,
8314 counterparty_shutdown_scriptpubkey,
8318 channel_update_status,
8319 closing_signed_in_flight: false,
8323 #[cfg(any(test, fuzzing))]
8324 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8325 #[cfg(any(test, fuzzing))]
8326 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8328 workaround_lnd_bug_4006: None,
8329 sent_message_awaiting_response: None,
8331 latest_inbound_scid_alias,
8332 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8333 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8335 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8336 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8338 #[cfg(any(test, fuzzing))]
8339 historical_inbound_htlc_fulfills,
8341 channel_type: channel_type.unwrap(),
8344 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8353 use bitcoin::blockdata::constants::ChainHash;
8354 use bitcoin::blockdata::script::{ScriptBuf, Builder};
8355 use bitcoin::blockdata::transaction::{Transaction, TxOut};
8356 use bitcoin::blockdata::opcodes;
8357 use bitcoin::network::constants::Network;
8358 use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8359 use crate::ln::{PaymentHash, PaymentPreimage};
8360 use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8361 use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8362 use crate::ln::channel::InitFeatures;
8363 use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8364 use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8365 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8366 use crate::ln::msgs;
8367 use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8368 use crate::ln::script::ShutdownScript;
8369 use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8370 use crate::chain::BestBlock;
8371 use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8372 use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8373 use crate::chain::transaction::OutPoint;
8374 use crate::routing::router::{Path, RouteHop};
8375 use crate::util::config::UserConfig;
8376 use crate::util::errors::APIError;
8377 use crate::util::ser::{ReadableArgs, Writeable};
8378 use crate::util::test_utils;
8379 use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8380 use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8381 use bitcoin::secp256k1::ffi::Signature as FFISignature;
8382 use bitcoin::secp256k1::{SecretKey,PublicKey};
8383 use bitcoin::hashes::sha256::Hash as Sha256;
8384 use bitcoin::hashes::Hash;
8385 use bitcoin::hashes::hex::FromHex;
8386 use bitcoin::hash_types::WPubkeyHash;
8387 use bitcoin::blockdata::locktime::absolute::LockTime;
8388 use bitcoin::address::{WitnessProgram, WitnessVersion};
8389 use crate::prelude::*;
8392 fn test_channel_state_order() {
8393 use crate::ln::channel::NegotiatingFundingFlags;
8394 use crate::ln::channel::AwaitingChannelReadyFlags;
8395 use crate::ln::channel::ChannelReadyFlags;
8397 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
8398 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
8399 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
8400 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
8403 struct TestFeeEstimator {
8406 impl FeeEstimator for TestFeeEstimator {
8407 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8413 fn test_max_funding_satoshis_no_wumbo() {
8414 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8415 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8416 "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8420 signer: InMemorySigner,
8423 impl EntropySource for Keys {
8424 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8427 impl SignerProvider for Keys {
8428 type EcdsaSigner = InMemorySigner;
8430 type TaprootSigner = InMemorySigner;
8432 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8433 self.signer.channel_keys_id()
8436 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8440 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8442 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8443 let secp_ctx = Secp256k1::signing_only();
8444 let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8445 let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8446 Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8449 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8450 let secp_ctx = Secp256k1::signing_only();
8451 let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8452 Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8456 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8457 fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8458 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8462 fn upfront_shutdown_script_incompatibility() {
8463 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8464 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8465 &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8468 let seed = [42; 32];
8469 let network = Network::Testnet;
8470 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8471 keys_provider.expect(OnGetShutdownScriptpubkey {
8472 returns: non_v0_segwit_shutdown_script.clone(),
8475 let secp_ctx = Secp256k1::new();
8476 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8477 let config = UserConfig::default();
8478 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8479 Err(APIError::IncompatibleShutdownScript { script }) => {
8480 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8482 Err(e) => panic!("Unexpected error: {:?}", e),
8483 Ok(_) => panic!("Expected error"),
8487 // Check that, during channel creation, we use the same feerate in the open channel message
8488 // as we do in the Channel object creation itself.
8490 fn test_open_channel_msg_fee() {
8491 let original_fee = 253;
8492 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8493 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8494 let secp_ctx = Secp256k1::new();
8495 let seed = [42; 32];
8496 let network = Network::Testnet;
8497 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8499 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8500 let config = UserConfig::default();
8501 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8503 // Now change the fee so we can check that the fee in the open_channel message is the
8504 // same as the old fee.
8505 fee_est.fee_est = 500;
8506 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8507 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8511 fn test_holder_vs_counterparty_dust_limit() {
8512 // Test that when calculating the local and remote commitment transaction fees, the correct
8513 // dust limits are used.
8514 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8515 let secp_ctx = Secp256k1::new();
8516 let seed = [42; 32];
8517 let network = Network::Testnet;
8518 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8519 let logger = test_utils::TestLogger::new();
8520 let best_block = BestBlock::from_network(network);
8522 // Go through the flow of opening a channel between two nodes, making sure
8523 // they have different dust limits.
8525 // Create Node A's channel pointing to Node B's pubkey
8526 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8527 let config = UserConfig::default();
8528 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8530 // Create Node B's channel by receiving Node A's open_channel message
8531 // Make sure A's dust limit is as we expect.
8532 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8533 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8534 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8536 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8537 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8538 accept_channel_msg.dust_limit_satoshis = 546;
8539 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8540 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8542 // Node A --> Node B: funding created
8543 let output_script = node_a_chan.context.get_funding_redeemscript();
8544 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8545 value: 10000000, script_pubkey: output_script.clone(),
8547 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8548 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8549 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8551 // Node B --> Node A: funding signed
8552 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8553 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8555 // Put some inbound and outbound HTLCs in A's channel.
8556 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8557 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8559 amount_msat: htlc_amount_msat,
8560 payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8561 cltv_expiry: 300000000,
8562 state: InboundHTLCState::Committed,
8565 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8567 amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8568 payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8569 cltv_expiry: 200000000,
8570 state: OutboundHTLCState::Committed,
8571 source: HTLCSource::OutboundRoute {
8572 path: Path { hops: Vec::new(), blinded_tail: None },
8573 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8574 first_hop_htlc_msat: 548,
8575 payment_id: PaymentId([42; 32]),
8577 skimmed_fee_msat: None,
8578 blinding_point: None,
8581 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8582 // the dust limit check.
8583 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8584 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8585 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8586 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8588 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8589 // of the HTLCs are seen to be above the dust limit.
8590 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8591 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8592 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8593 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8594 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8598 fn test_timeout_vs_success_htlc_dust_limit() {
8599 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8600 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8601 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8602 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8603 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8604 let secp_ctx = Secp256k1::new();
8605 let seed = [42; 32];
8606 let network = Network::Testnet;
8607 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8609 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8610 let config = UserConfig::default();
8611 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8613 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8614 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8616 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8617 // counted as dust when it shouldn't be.
8618 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8619 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8620 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8621 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8623 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8624 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8625 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8626 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8627 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8629 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8631 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8632 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8633 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8634 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8635 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8637 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8638 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8639 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8640 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8641 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8645 fn channel_reestablish_no_updates() {
8646 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8647 let logger = test_utils::TestLogger::new();
8648 let secp_ctx = Secp256k1::new();
8649 let seed = [42; 32];
8650 let network = Network::Testnet;
8651 let best_block = BestBlock::from_network(network);
8652 let chain_hash = ChainHash::using_genesis_block(network);
8653 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8655 // Go through the flow of opening a channel between two nodes.
8657 // Create Node A's channel pointing to Node B's pubkey
8658 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8659 let config = UserConfig::default();
8660 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8662 // Create Node B's channel by receiving Node A's open_channel message
8663 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8664 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8665 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8667 // Node B --> Node A: accept channel
8668 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8669 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8671 // Node A --> Node B: funding created
8672 let output_script = node_a_chan.context.get_funding_redeemscript();
8673 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8674 value: 10000000, script_pubkey: output_script.clone(),
8676 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8677 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8678 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8680 // Node B --> Node A: funding signed
8681 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8682 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8684 // Now disconnect the two nodes and check that the commitment point in
8685 // Node B's channel_reestablish message is sane.
8686 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8687 let msg = node_b_chan.get_channel_reestablish(&&logger);
8688 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8689 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8690 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8692 // Check that the commitment point in Node A's channel_reestablish message
8694 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8695 let msg = node_a_chan.get_channel_reestablish(&&logger);
8696 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8697 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8698 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8702 fn test_configured_holder_max_htlc_value_in_flight() {
8703 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8704 let logger = test_utils::TestLogger::new();
8705 let secp_ctx = Secp256k1::new();
8706 let seed = [42; 32];
8707 let network = Network::Testnet;
8708 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8709 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8710 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8712 let mut config_2_percent = UserConfig::default();
8713 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8714 let mut config_99_percent = UserConfig::default();
8715 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8716 let mut config_0_percent = UserConfig::default();
8717 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8718 let mut config_101_percent = UserConfig::default();
8719 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8721 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8722 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8723 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8724 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8725 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8726 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8728 // Test with the upper bound - 1 of valid values (99%).
8729 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8730 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8731 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8733 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8735 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8736 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8737 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8738 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8739 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8740 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8742 // Test with the upper bound - 1 of valid values (99%).
8743 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8744 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8745 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8747 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8748 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8749 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8750 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8751 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8753 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8754 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8756 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8757 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8758 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8760 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8761 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8762 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8763 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8764 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8766 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8767 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8769 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8770 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8771 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8775 fn test_configured_holder_selected_channel_reserve_satoshis() {
8777 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8778 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8779 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8781 // Test with valid but unreasonably high channel reserves
8782 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8783 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8784 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8786 // Test with calculated channel reserve less than lower bound
8787 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8788 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8790 // Test with invalid channel reserves since sum of both is greater than or equal
8792 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8793 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8796 fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8797 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8798 let logger = test_utils::TestLogger::new();
8799 let secp_ctx = Secp256k1::new();
8800 let seed = [42; 32];
8801 let network = Network::Testnet;
8802 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8803 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8804 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8807 let mut outbound_node_config = UserConfig::default();
8808 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8809 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8811 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8812 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8814 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8815 let mut inbound_node_config = UserConfig::default();
8816 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8818 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8819 let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8821 let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8823 assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8824 assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8826 // Channel Negotiations failed
8827 let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8828 assert!(result.is_err());
8833 fn channel_update() {
8834 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8835 let logger = test_utils::TestLogger::new();
8836 let secp_ctx = Secp256k1::new();
8837 let seed = [42; 32];
8838 let network = Network::Testnet;
8839 let best_block = BestBlock::from_network(network);
8840 let chain_hash = ChainHash::using_genesis_block(network);
8841 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8843 // Create Node A's channel pointing to Node B's pubkey
8844 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8845 let config = UserConfig::default();
8846 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8848 // Create Node B's channel by receiving Node A's open_channel message
8849 // Make sure A's dust limit is as we expect.
8850 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8851 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8852 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8854 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8855 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8856 accept_channel_msg.dust_limit_satoshis = 546;
8857 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8858 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8860 // Node A --> Node B: funding created
8861 let output_script = node_a_chan.context.get_funding_redeemscript();
8862 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8863 value: 10000000, script_pubkey: output_script.clone(),
8865 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8866 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8867 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8869 // Node B --> Node A: funding signed
8870 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8871 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8873 // Make sure that receiving a channel update will update the Channel as expected.
8874 let update = ChannelUpdate {
8875 contents: UnsignedChannelUpdate {
8877 short_channel_id: 0,
8880 cltv_expiry_delta: 100,
8881 htlc_minimum_msat: 5,
8882 htlc_maximum_msat: MAX_VALUE_MSAT,
8884 fee_proportional_millionths: 11,
8885 excess_data: Vec::new(),
8887 signature: Signature::from(unsafe { FFISignature::new() })
8889 assert!(node_a_chan.channel_update(&update).unwrap());
8891 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8892 // change our official htlc_minimum_msat.
8893 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8894 match node_a_chan.context.counterparty_forwarding_info() {
8896 assert_eq!(info.cltv_expiry_delta, 100);
8897 assert_eq!(info.fee_base_msat, 110);
8898 assert_eq!(info.fee_proportional_millionths, 11);
8900 None => panic!("expected counterparty forwarding info to be Some")
8903 assert!(!node_a_chan.channel_update(&update).unwrap());
8907 fn blinding_point_skimmed_fee_malformed_ser() {
8908 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
8910 let logger = test_utils::TestLogger::new();
8911 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8912 let secp_ctx = Secp256k1::new();
8913 let seed = [42; 32];
8914 let network = Network::Testnet;
8915 let best_block = BestBlock::from_network(network);
8916 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8918 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8919 let config = UserConfig::default();
8920 let features = channelmanager::provided_init_features(&config);
8921 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
8922 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
8924 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
8925 &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
8926 &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
8928 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
8929 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8930 value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
8932 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8933 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
8934 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
8935 Ok((chan, _, _)) => chan,
8936 Err((_, e)) => panic!("{}", e),
8939 let dummy_htlc_source = HTLCSource::OutboundRoute {
8941 hops: vec![RouteHop {
8942 pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8943 node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8944 cltv_expiry_delta: 0, maybe_announced_channel: false,
8948 session_priv: test_utils::privkey(42),
8949 first_hop_htlc_msat: 0,
8950 payment_id: PaymentId([42; 32]),
8952 let dummy_outbound_output = OutboundHTLCOutput {
8955 payment_hash: PaymentHash([43; 32]),
8957 state: OutboundHTLCState::Committed,
8958 source: dummy_htlc_source.clone(),
8959 skimmed_fee_msat: None,
8960 blinding_point: None,
8962 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8963 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8965 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8968 htlc.skimmed_fee_msat = Some(1);
8971 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8973 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8976 payment_hash: PaymentHash([43; 32]),
8977 source: dummy_htlc_source.clone(),
8978 onion_routing_packet: msgs::OnionPacket {
8980 public_key: Ok(test_utils::pubkey(1)),
8981 hop_data: [0; 20*65],
8984 skimmed_fee_msat: None,
8985 blinding_point: None,
8987 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8988 payment_preimage: PaymentPreimage([42; 32]),
8991 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
8992 htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
8994 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
8995 htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
8997 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
9000 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
9001 } else if i % 5 == 1 {
9002 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
9003 } else if i % 5 == 2 {
9004 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
9005 if let HTLCUpdateAwaitingACK::AddHTLC {
9006 ref mut blinding_point, ref mut skimmed_fee_msat, ..
9007 } = &mut dummy_add {
9008 *blinding_point = Some(test_utils::pubkey(42 + i));
9009 *skimmed_fee_msat = Some(42);
9011 holding_cell_htlc_updates.push(dummy_add);
9012 } else if i % 5 == 3 {
9013 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
9015 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
9018 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
9020 // Encode and decode the channel and ensure that the HTLCs within are the same.
9021 let encoded_chan = chan.encode();
9022 let mut s = crate::io::Cursor::new(&encoded_chan);
9023 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
9024 let features = channelmanager::provided_channel_type_features(&config);
9025 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
9026 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
9027 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9030 #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9032 fn outbound_commitment_test() {
9033 use bitcoin::sighash;
9034 use bitcoin::consensus::encode::serialize;
9035 use bitcoin::sighash::EcdsaSighashType;
9036 use bitcoin::hashes::hex::FromHex;
9037 use bitcoin::hash_types::Txid;
9038 use bitcoin::secp256k1::Message;
9039 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9040 use crate::ln::PaymentPreimage;
9041 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9042 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9043 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9044 use crate::util::logger::Logger;
9045 use crate::sync::Arc;
9046 use core::str::FromStr;
9047 use hex::DisplayHex;
9049 // Test vectors from BOLT 3 Appendices C and F (anchors):
9050 let feeest = TestFeeEstimator{fee_est: 15000};
9051 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9052 let secp_ctx = Secp256k1::new();
9054 let mut signer = InMemorySigner::new(
9056 SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9057 SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9058 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9059 SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9060 SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9062 // These aren't set in the test vectors:
9063 [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9069 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9070 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9071 let keys_provider = Keys { signer: signer.clone() };
9073 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9074 let mut config = UserConfig::default();
9075 config.channel_handshake_config.announced_channel = false;
9076 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9077 chan.context.holder_dust_limit_satoshis = 546;
9078 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9080 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9082 let counterparty_pubkeys = ChannelPublicKeys {
9083 funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9084 revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9085 payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9086 delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9087 htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9089 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9090 CounterpartyChannelTransactionParameters {
9091 pubkeys: counterparty_pubkeys.clone(),
9092 selected_contest_delay: 144
9094 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9095 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9097 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9098 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9100 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9101 <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9103 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9104 <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9106 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9107 // derived from a commitment_seed, so instead we copy it here and call
9108 // build_commitment_transaction.
9109 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9110 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9111 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9112 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9113 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9115 macro_rules! test_commitment {
9116 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9117 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9118 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9122 macro_rules! test_commitment_with_anchors {
9123 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9124 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9125 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9129 macro_rules! test_commitment_common {
9130 ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9131 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9133 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9134 let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9136 let htlcs = commitment_stats.htlcs_included.drain(..)
9137 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9139 (commitment_stats.tx, htlcs)
9141 let trusted_tx = commitment_tx.trust();
9142 let unsigned_tx = trusted_tx.built_transaction();
9143 let redeemscript = chan.context.get_funding_redeemscript();
9144 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9145 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9146 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9147 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9149 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9150 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9151 let mut counterparty_htlc_sigs = Vec::new();
9152 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9154 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9155 per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9156 counterparty_htlc_sigs.push(remote_signature);
9158 assert_eq!(htlcs.len(), per_htlc.len());
9160 let holder_commitment_tx = HolderCommitmentTransaction::new(
9161 commitment_tx.clone(),
9162 counterparty_signature,
9163 counterparty_htlc_sigs,
9164 &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9165 chan.context.counterparty_funding_pubkey()
9167 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9168 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9170 let funding_redeemscript = chan.context.get_funding_redeemscript();
9171 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9172 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9174 // ((htlc, counterparty_sig), (index, holder_sig))
9175 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9178 log_trace!(logger, "verifying htlc {}", $htlc_idx);
9179 let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9181 let ref htlc = htlcs[$htlc_idx];
9182 let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9183 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9184 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9185 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9186 let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9187 let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9188 assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9190 let mut preimage: Option<PaymentPreimage> = None;
9193 let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9194 if out == htlc.payment_hash {
9195 preimage = Some(PaymentPreimage([i; 32]));
9199 assert!(preimage.is_some());
9202 let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9203 let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9204 channel_derivation_parameters: ChannelDerivationParameters {
9205 value_satoshis: chan.context.channel_value_satoshis,
9206 keys_id: chan.context.channel_keys_id,
9207 transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9209 commitment_txid: trusted_tx.txid(),
9210 per_commitment_number: trusted_tx.commitment_number(),
9211 per_commitment_point: trusted_tx.per_commitment_point(),
9212 feerate_per_kw: trusted_tx.feerate_per_kw(),
9214 preimage: preimage.clone(),
9215 counterparty_sig: *htlc_counterparty_sig,
9216 }, &secp_ctx).unwrap();
9217 let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9218 assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9220 let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9221 assert_eq!(signature, htlc_holder_sig, "htlc sig");
9222 let trusted_tx = holder_commitment_tx.trust();
9223 htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9224 log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9225 assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9227 assert!(htlc_counterparty_sig_iter.next().is_none());
9231 // anchors: simple commitment tx with no HTLCs and single anchor
9232 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9233 "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9234 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9236 // simple commitment tx with no HTLCs
9237 chan.context.value_to_self_msat = 7000000000;
9239 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9240 "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9241 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9243 // anchors: simple commitment tx with no HTLCs
9244 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9245 "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9246 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9248 chan.context.pending_inbound_htlcs.push({
9249 let mut out = InboundHTLCOutput{
9251 amount_msat: 1000000,
9253 payment_hash: PaymentHash([0; 32]),
9254 state: InboundHTLCState::Committed,
9256 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9259 chan.context.pending_inbound_htlcs.push({
9260 let mut out = InboundHTLCOutput{
9262 amount_msat: 2000000,
9264 payment_hash: PaymentHash([0; 32]),
9265 state: InboundHTLCState::Committed,
9267 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9270 chan.context.pending_outbound_htlcs.push({
9271 let mut out = OutboundHTLCOutput{
9273 amount_msat: 2000000,
9275 payment_hash: PaymentHash([0; 32]),
9276 state: OutboundHTLCState::Committed,
9277 source: HTLCSource::dummy(),
9278 skimmed_fee_msat: None,
9279 blinding_point: None,
9281 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9284 chan.context.pending_outbound_htlcs.push({
9285 let mut out = OutboundHTLCOutput{
9287 amount_msat: 3000000,
9289 payment_hash: PaymentHash([0; 32]),
9290 state: OutboundHTLCState::Committed,
9291 source: HTLCSource::dummy(),
9292 skimmed_fee_msat: None,
9293 blinding_point: None,
9295 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9298 chan.context.pending_inbound_htlcs.push({
9299 let mut out = InboundHTLCOutput{
9301 amount_msat: 4000000,
9303 payment_hash: PaymentHash([0; 32]),
9304 state: InboundHTLCState::Committed,
9306 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9310 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9311 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9312 chan.context.feerate_per_kw = 0;
9314 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9315 "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9316 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9319 "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9320 "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9321 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9324 "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9325 "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9326 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9329 "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9330 "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9331 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9334 "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9335 "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9336 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9339 "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9340 "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9341 "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9344 // commitment tx with seven outputs untrimmed (maximum feerate)
9345 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9346 chan.context.feerate_per_kw = 647;
9348 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9349 "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9350 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9353 "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9354 "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9355 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9358 "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9359 "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9360 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9363 "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9364 "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9365 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9368 "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9369 "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9370 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9373 "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9374 "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9375 "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9378 // commitment tx with six outputs untrimmed (minimum feerate)
9379 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9380 chan.context.feerate_per_kw = 648;
9382 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9383 "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9384 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9387 "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9388 "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9389 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9392 "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9393 "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9394 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9397 "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9398 "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9399 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9402 "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9403 "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9404 "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9407 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9408 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9409 chan.context.feerate_per_kw = 645;
9410 chan.context.holder_dust_limit_satoshis = 1001;
9412 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9413 "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9414 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9417 "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9418 "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9419 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9422 "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9423 "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9424 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9427 "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9428 "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9429 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9432 "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9433 "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9434 "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9437 // commitment tx with six outputs untrimmed (maximum feerate)
9438 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9439 chan.context.feerate_per_kw = 2069;
9440 chan.context.holder_dust_limit_satoshis = 546;
9442 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9443 "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9444 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9447 "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9448 "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9449 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9452 "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9453 "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9454 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9457 "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9458 "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9459 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9462 "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9463 "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9464 "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9467 // commitment tx with five outputs untrimmed (minimum feerate)
9468 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9469 chan.context.feerate_per_kw = 2070;
9471 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9472 "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9473 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9476 "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9477 "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9478 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9481 "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9482 "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9483 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9486 "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9487 "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9488 "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9491 // commitment tx with five outputs untrimmed (maximum feerate)
9492 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9493 chan.context.feerate_per_kw = 2194;
9495 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9496 "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9497 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9500 "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9501 "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9502 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9505 "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9506 "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9507 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9510 "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9511 "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9512 "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9515 // commitment tx with four outputs untrimmed (minimum feerate)
9516 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9517 chan.context.feerate_per_kw = 2195;
9519 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9520 "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9521 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9524 "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9525 "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9526 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9529 "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9530 "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9531 "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9534 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9535 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9536 chan.context.feerate_per_kw = 2185;
9537 chan.context.holder_dust_limit_satoshis = 2001;
9538 let cached_channel_type = chan.context.channel_type;
9539 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9541 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9542 "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9543 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9546 "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9547 "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9548 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9551 "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9552 "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9553 "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9556 // commitment tx with four outputs untrimmed (maximum feerate)
9557 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9558 chan.context.feerate_per_kw = 3702;
9559 chan.context.holder_dust_limit_satoshis = 546;
9560 chan.context.channel_type = cached_channel_type.clone();
9562 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9563 "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9564 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9567 "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9568 "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9569 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9572 "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9573 "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9574 "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9577 // commitment tx with three outputs untrimmed (minimum feerate)
9578 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9579 chan.context.feerate_per_kw = 3703;
9581 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9582 "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9583 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9586 "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9587 "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9588 "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9591 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9592 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9593 chan.context.feerate_per_kw = 3687;
9594 chan.context.holder_dust_limit_satoshis = 3001;
9595 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9597 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9598 "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9599 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9602 "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9603 "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9604 "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9607 // commitment tx with three outputs untrimmed (maximum feerate)
9608 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9609 chan.context.feerate_per_kw = 4914;
9610 chan.context.holder_dust_limit_satoshis = 546;
9611 chan.context.channel_type = cached_channel_type.clone();
9613 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9614 "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9615 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9618 "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9619 "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9620 "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9623 // commitment tx with two outputs untrimmed (minimum feerate)
9624 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9625 chan.context.feerate_per_kw = 4915;
9626 chan.context.holder_dust_limit_satoshis = 546;
9628 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9629 "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9630 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9632 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9633 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9634 chan.context.feerate_per_kw = 4894;
9635 chan.context.holder_dust_limit_satoshis = 4001;
9636 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9638 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9639 "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9640 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9642 // commitment tx with two outputs untrimmed (maximum feerate)
9643 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9644 chan.context.feerate_per_kw = 9651180;
9645 chan.context.holder_dust_limit_satoshis = 546;
9646 chan.context.channel_type = cached_channel_type.clone();
9648 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9649 "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9650 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9652 // commitment tx with one output untrimmed (minimum feerate)
9653 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9654 chan.context.feerate_per_kw = 9651181;
9656 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9657 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9658 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9660 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9661 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9662 chan.context.feerate_per_kw = 6216010;
9663 chan.context.holder_dust_limit_satoshis = 4001;
9664 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9666 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9667 "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9668 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9670 // commitment tx with fee greater than funder amount
9671 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9672 chan.context.feerate_per_kw = 9651936;
9673 chan.context.holder_dust_limit_satoshis = 546;
9674 chan.context.channel_type = cached_channel_type;
9676 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9677 "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9678 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9680 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9681 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9682 chan.context.feerate_per_kw = 253;
9683 chan.context.pending_inbound_htlcs.clear();
9684 chan.context.pending_inbound_htlcs.push({
9685 let mut out = InboundHTLCOutput{
9687 amount_msat: 2000000,
9689 payment_hash: PaymentHash([0; 32]),
9690 state: InboundHTLCState::Committed,
9692 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9695 chan.context.pending_outbound_htlcs.clear();
9696 chan.context.pending_outbound_htlcs.push({
9697 let mut out = OutboundHTLCOutput{
9699 amount_msat: 5000001,
9701 payment_hash: PaymentHash([0; 32]),
9702 state: OutboundHTLCState::Committed,
9703 source: HTLCSource::dummy(),
9704 skimmed_fee_msat: None,
9705 blinding_point: None,
9707 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9710 chan.context.pending_outbound_htlcs.push({
9711 let mut out = OutboundHTLCOutput{
9713 amount_msat: 5000000,
9715 payment_hash: PaymentHash([0; 32]),
9716 state: OutboundHTLCState::Committed,
9717 source: HTLCSource::dummy(),
9718 skimmed_fee_msat: None,
9719 blinding_point: None,
9721 out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9725 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9726 "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9727 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9730 "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9731 "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9732 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9734 "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9735 "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9736 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9738 "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9739 "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9740 "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9743 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9744 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9745 "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9746 "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9749 "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9750 "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9751 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9753 "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9754 "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9755 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9757 "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9758 "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9759 "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9764 fn test_per_commitment_secret_gen() {
9765 // Test vectors from BOLT 3 Appendix D:
9767 let mut seed = [0; 32];
9768 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9769 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9770 <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9772 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9773 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9774 <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9776 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9777 <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9779 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9780 <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9782 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9783 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9784 <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9788 fn test_key_derivation() {
9789 // Test vectors from BOLT 3 Appendix E:
9790 let secp_ctx = Secp256k1::new();
9792 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9793 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9795 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9796 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9798 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9799 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9801 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9802 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9804 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9805 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9807 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9808 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9812 fn test_zero_conf_channel_type_support() {
9813 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9814 let secp_ctx = Secp256k1::new();
9815 let seed = [42; 32];
9816 let network = Network::Testnet;
9817 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9818 let logger = test_utils::TestLogger::new();
9820 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9821 let config = UserConfig::default();
9822 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9823 node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9825 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9826 channel_type_features.set_zero_conf_required();
9828 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9829 open_channel_msg.channel_type = Some(channel_type_features);
9830 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9831 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9832 node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9833 &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9834 assert!(res.is_ok());
9838 fn test_supports_anchors_zero_htlc_tx_fee() {
9839 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9840 // resulting `channel_type`.
9841 let secp_ctx = Secp256k1::new();
9842 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9843 let network = Network::Testnet;
9844 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9845 let logger = test_utils::TestLogger::new();
9847 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9848 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9850 let mut config = UserConfig::default();
9851 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9853 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9854 // need to signal it.
9855 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9856 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9857 &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9858 &config, 0, 42, None
9860 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9862 let mut expected_channel_type = ChannelTypeFeatures::empty();
9863 expected_channel_type.set_static_remote_key_required();
9864 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9866 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9867 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9868 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9872 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9873 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9874 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9875 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9876 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9879 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9880 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9884 fn test_rejects_implicit_simple_anchors() {
9885 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9886 // each side's `InitFeatures`, it is rejected.
9887 let secp_ctx = Secp256k1::new();
9888 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9889 let network = Network::Testnet;
9890 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9891 let logger = test_utils::TestLogger::new();
9893 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9894 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9896 let config = UserConfig::default();
9898 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9899 let static_remote_key_required: u64 = 1 << 12;
9900 let simple_anchors_required: u64 = 1 << 20;
9901 let raw_init_features = static_remote_key_required | simple_anchors_required;
9902 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9904 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9905 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9906 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9910 // Set `channel_type` to `None` to force the implicit feature negotiation.
9911 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9912 open_channel_msg.channel_type = None;
9914 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9915 // `static_remote_key`, it will fail the channel.
9916 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9917 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9918 &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9919 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9921 assert!(channel_b.is_err());
9925 fn test_rejects_simple_anchors_channel_type() {
9926 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9928 let secp_ctx = Secp256k1::new();
9929 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9930 let network = Network::Testnet;
9931 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9932 let logger = test_utils::TestLogger::new();
9934 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9935 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9937 let config = UserConfig::default();
9939 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9940 let static_remote_key_required: u64 = 1 << 12;
9941 let simple_anchors_required: u64 = 1 << 20;
9942 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9943 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9944 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9945 assert!(!simple_anchors_init.requires_unknown_bits());
9946 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9948 // First, we'll try to open a channel between A and B where A requests a channel type for
9949 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9950 // B as it's not supported by LDK.
9951 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9952 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9953 &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9957 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9958 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9960 let res = InboundV1Channel::<&TestKeysInterface>::new(
9961 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9962 &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9963 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9965 assert!(res.is_err());
9967 // Then, we'll try to open another channel where A requests a channel type for
9968 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9969 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9971 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9972 &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9973 10000000, 100000, 42, &config, 0, 42, None
9976 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9978 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9979 &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9980 &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9981 &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9984 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9985 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9987 let res = channel_a.accept_channel(
9988 &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9990 assert!(res.is_err());
9994 fn test_waiting_for_batch() {
9995 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9996 let logger = test_utils::TestLogger::new();
9997 let secp_ctx = Secp256k1::new();
9998 let seed = [42; 32];
9999 let network = Network::Testnet;
10000 let best_block = BestBlock::from_network(network);
10001 let chain_hash = ChainHash::using_genesis_block(network);
10002 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10004 let mut config = UserConfig::default();
10005 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
10006 // channel in a batch before all channels are ready.
10007 config.channel_handshake_limits.trust_own_funding_0conf = true;
10009 // Create a channel from node a to node b that will be part of batch funding.
10010 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10011 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10016 &channelmanager::provided_init_features(&config),
10026 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10027 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10028 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10033 &channelmanager::provided_channel_type_features(&config),
10034 &channelmanager::provided_init_features(&config),
10040 true, // Allow node b to send a 0conf channel_ready.
10043 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10044 node_a_chan.accept_channel(
10045 &accept_channel_msg,
10046 &config.channel_handshake_limits,
10047 &channelmanager::provided_init_features(&config),
10050 // Fund the channel with a batch funding transaction.
10051 let output_script = node_a_chan.context.get_funding_redeemscript();
10052 let tx = Transaction {
10054 lock_time: LockTime::ZERO,
10058 value: 10000000, script_pubkey: output_script.clone(),
10061 value: 10000000, script_pubkey: Builder::new().into_script(),
10064 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10065 let funding_created_msg = node_a_chan.get_funding_created(
10066 tx.clone(), funding_outpoint, true, &&logger,
10067 ).map_err(|_| ()).unwrap();
10068 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10069 &funding_created_msg.unwrap(),
10073 ).map_err(|_| ()).unwrap();
10074 let node_b_updates = node_b_chan.monitor_updating_restored(
10082 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10083 // broadcasting the funding transaction until the batch is ready.
10084 let res = node_a_chan.funding_signed(
10085 &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10087 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10088 let node_a_updates = node_a_chan.monitor_updating_restored(
10095 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10096 // as the funding transaction depends on all channels in the batch becoming ready.
10097 assert!(node_a_updates.channel_ready.is_none());
10098 assert!(node_a_updates.funding_broadcastable.is_none());
10099 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10101 // It is possible to receive a 0conf channel_ready from the remote node.
10102 node_a_chan.channel_ready(
10103 &node_b_updates.channel_ready.unwrap(),
10111 node_a_chan.context.channel_state,
10112 ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10115 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10116 node_a_chan.set_batch_ready();
10117 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10118 assert!(node_a_chan.check_get_channel_ready(0).is_some());